{"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/raster3d/r3.gradient/main.c", "omp_pragma_line": "#pragma omp parallel for schedule", "context_chars": 100, "text": " == max_i - 1) {\n\n /* compute gradient */\n /* disabled openMP * (static) private (k) */\n for (k = 0; k <= j; k++) {\n Rast3d_gradient_double(&(blocks[k].input), step,\n &(blocks[k].dx), &(blocks[k].dy),\n &(blocks[k].dz));\n } #pragma omp parallel for schedule"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/raster/r.proj/main.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " does not always work,\n * segfaults in the interpolation functions\n * can happen */\nfor (col = 0; col < outcellhd.cols; col++) {\n void *obufptr =\n (void *)((const unsigned char *)obuffer + col * cell_size);\n\n double xcoord1 = xcoord2 + (col)*outcellhd.ew_res;\n double ycoord1 = ycoord2;\n\n /* project coordinates in output matrix to */\n /* coordinates in input matrix */\n if (GPJ_transform(&oproj, &iproj, &tproj, PJ_FWD, &xcoord1,\n &ycoord1, NULL) < 0) {\n G_fatal_error(_(\"Error in %s\"), \"GPJ_transform()\");\n Rast_set_null_value(obufptr, 1, cell_type);\n }\n else {\n /* convert to row/column indices of input matrix */\n\n /* column index in input matrix */\n double col_idx = (xcoord1 - incellhd.west) / incellhd.ew_res;\n\n /* row index in input matrix */\n double row_idx = (incellhd.north - ycoord1) / incellhd.ns_res;\n\n /* and resample data point */\n interpolate(ibuffer, obufptr, cell_type, col_idx, row_idx,\n &incellhd);\n }\n\n /* obufptr = G_incr_void_ptr(obufptr, cell_size); */\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+ : s)", "context_chars": 100, "text": " G_math_d_x_dot_y(double *x, double *y, double *value, int rows)\n{\n int i;\n\n double s = 0.0;\n\nfor (i = rows - 1; i >= 0; i--) {\n s += x[i] * y[i];\n } #pragma omp parallel for schedule(static) reduction(+ : s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+ : s)", "context_chars": 100, "text": "*/\nvoid G_math_d_euclid_norm(double *x, double *value, int rows)\n{\n int i;\n\n double s = 0.0;\n\nfor (i = rows - 1; i >= 0; i--) {\n s += x[i] * x[i];\n } #pragma omp parallel for schedule(static) reduction(+ : s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+ : s)", "context_chars": 100, "text": "\nvoid G_math_d_asum_norm(double *x, double *value, int rows)\n{\n int i = 0;\n\n double s = 0.0;\n\nfor (i = rows - 1; i >= 0; i--) {\n s += fabs(x[i]);\n } #pragma omp parallel for schedule(static) reduction(+ : s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+ : s)", "context_chars": 100, "text": "void G_math_f_x_dot_y(float *x, float *y, float *value, int rows)\n{\n int i;\n\n float s = 0.0;\n\nfor (i = rows - 1; i >= 0; i--) {\n s += x[i] * y[i];\n } #pragma omp parallel for schedule(static) reduction(+ : s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+ : s)", "context_chars": 100, "text": " * */\nvoid G_math_f_euclid_norm(float *x, float *value, int rows)\n{\n int i;\n\n float s = 0.0;\n\nfor (i = rows - 1; i >= 0; i--) {\n s += x[i] * x[i];\n } #pragma omp parallel for schedule(static) reduction(+ : s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i) reduction(+ : s)", "context_chars": 100, "text": "*\n * */\nvoid G_math_f_asum_norm(float *x, float *value, int rows)\n{\n int i;\n\n float s = 0.0;\n\nfor (i = 0; i < rows; i++) {\n s += fabs(x[i]);\n } #pragma omp parallel for schedule(static) private(i) reduction(+ : s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+ : s)", "context_chars": 100, "text": "/\nvoid G_math_i_x_dot_y(int *x, int *y, double *value, int rows)\n{\n int i;\n\n double s = 0.0;\n\nfor (i = rows - 1; i >= 0; i--) {\n s += x[i] * y[i];\n } #pragma omp parallel for schedule(static) reduction(+ : s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+ : s)", "context_chars": 100, "text": " * */\nvoid G_math_i_euclid_norm(int *x, double *value, int rows)\n{\n int i;\n\n double s = 0.0;\n\nfor (i = rows - 1; i >= 0; i--) {\n s += x[i] * x[i];\n } #pragma omp parallel for schedule(static) reduction(+ : s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+ : s)", "context_chars": 100, "text": "*\n * */\nvoid G_math_i_asum_norm(int *x, double *value, int rows)\n{\n int i;\n\n double s = 0.0;\n\nfor (i = rows - 1; i >= 0; i--) {\n s += (double)abs(x[i]);\n } #pragma omp parallel for schedule(static) reduction(+ : s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i, j, tmpval) \\", "context_chars": 100, "text": "le *b, int rows)\n{\n int i, j, k;\n\n double tmpval = 0.0;\n\n for (k = 0; k < rows - 1; k++) {\nshared(k, A, b, rows)\n for (i = k + 1; i < rows; i++) {\n tmpval = A[i][k] / A[k][k];\n b[i] = b[i] - tmpval * b[k];\n for (j = k + 1; j < rows; j++) {\n A[i][j] = A[i][j] - tmpval * A[k][j];\n }\n } #pragma omp parallel for schedule(static) private(i, j, tmpval) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i, j) shared(k, A, rows)", "context_chars": 100, "text": "n(double **A, double *b UNUSED, int rows)\n{\n\n int i, j, k;\n\n for (k = 0; k < rows - 1; k++) {\nfor (i = k + 1; i < rows; i++) {\n A[i][k] = A[i][k] / A[k][k];\n for (j = k + 1; j < rows; j++) {\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n }\n } #pragma omp parallel for schedule(static) private(i, j) shared(k, A, rows)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i, j, sum_2) shared(A, k) \\", "context_chars": 100, "text": "ndwidth <= 0)\n bandwidth = rows;\n\n colsize = bandwidth;\n\n for (k = 0; k < rows; k++) {\nreduction(+ : sum_1)\n for (j = 0; j < k; j++) {\n sum_1 += A[k][j] * A[k][j];\n } #pragma omp parallel for schedule(static) private(i, j, sum_2) shared(A, k) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i, j, sum_2) \\", "context_chars": 100, "text": " colsize = rows;\n }\n else {\n colsize = k + bandwidth;\n }\n\nshared(A, k, sum_1, colsize)\n\n for (i = k + 1; i < colsize; i++) {\n sum_2 = 0.0;\n for (j = 0; j < k; j++) {\n sum_2 += A[i][j] * A[k][j];\n }\n A[i][k] = (A[i][k] - sum_2) / A[k][k];\n } #pragma omp parallel for schedule(static) private(i, j, sum_2) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i, k) shared(A, rows)", "context_chars": 100, "text": " A[k][k];\n }\n }\n /* we need to copy the lower triangle matrix to the upper triangle */\nfor (k = 0; k < rows; k++) {\n for (i = k + 1; i < rows; i++) {\n A[k][i] = A[i][k];\n }\n } #pragma omp parallel for schedule(static) private(i, k) shared(A, rows)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct_cholesky_band.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(j, k, end, sum) \\", "context_chars": 100, "text": " G_fatal_error(_(\"Decomposition failed at row %i and col %i\"), i, 0);\n T[i][0] = sqrt(sum);\n\nshared(A, T, i, bandwidth)\n for (j = 1; j < bandwidth; j++) {\n sum = A[i][j];\n end = ((bandwidth - j) < (i + 1) ? (bandwidth - j) : (i + 1));\n for (k = 1; k < end; k++)\n sum -= T[i - k][k] * T[i - k][j + k];\n T[i][j] = sum / T[i][0];\n } #pragma omp parallel for schedule(static) private(j, k, end, sum) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/sparse_matrix.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i, j)", "context_chars": 100, "text": "ows)\n{\n int i;\n unsigned int j;\n\n double **A = NULL;\n\n A = G_alloc_matrix(rows, rows);\n\nfor (i = 0; i < rows; i++) {\n for (j = 0; j < Asp[i]->cols; j++) {\n A[i][Asp[i]->index[j]] = Asp[i]->values[j];\n }\n } #pragma omp parallel for schedule(static) private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/sparse_matrix.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i, j, nonull, count)", "context_chars": 100, "text": " int nonull, count = 0;\n\n G_math_spvector **Asp = NULL;\n\n Asp = G_math_alloc_spmatrix(rows);\n\nfor (i = 0; i < rows; i++) {\n nonull = 0;\n /*Count the number of non zero entries */\n for (j = 0; j < rows; j++) {\n if (A[i][j] > epsilon)\n nonull++;\n }\n /*Allocate the sparse vector and insert values */\n G_math_spvector *v = G_math_alloc_spvector(nonull);\n\n count = 0;\n for (j = 0; j < rows; j++) {\n if (A[i][j] > epsilon) {\n v->index[count] = j;\n v->values[count] = A[i][j];\n count++;\n }\n }\n /*Add vector to sparse matrix */\n G_math_add_spvector(Asp, v, i);\n } #pragma omp parallel for schedule(static) private(i, j, nonull, count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/lu.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, big, temp) shared(n, a, vv,", "context_chars": 100, "text": "ector(n);\n *d = 1.0;\n /* this pragma works, but doesn't really help speed things up */\n /* * is_singular) */\n for (i = 0; i < n; i++) {\n big = 0.0;\n for (j = 0; j < n; j++)\n if ((temp = fabs(a[i][j])) > big)\n big = temp;\n\n if (big == 0.0) {\n is_singular = TRUE;\n break;\n }\n\n vv[i] = 1.0 / big;\n } #pragma omp parallel for private(i, j, big, temp) shared(n, a, vv,"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/lu.c", "omp_pragma_line": "#pragma omp parallel for private(i, k, sum, dum) shared(j, n, a, vv, big, imax)", "context_chars": 100, "text": "}\n\n big = 0.0;\n /* not very efficient, but this pragma helps speed things up a bit */\nfor (i = j; i < n; i++) {\n sum = a[i][j];\n for (k = 0; k < j; k++)\n sum -= a[i][k] * a[k][j];\n a[i][j] = sum;\n if ((dum = vv[i] * fabs(sum)) >= big) {\n big = dum;\n imax = i;\n }\n } #pragma omp parallel for private(i, k, sum, dum) shared(j, n, a, vv, big, imax)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_krylov.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i, j, sum) \\", "context_chars": 100, "text": " double sum;\n\n assert(rows >= 0);\n\n Msp = G_math_alloc_spmatrix(rows);\n\n if (A != NULL) {\nshared(A, Msp, rows, cols, prec)\n for (i = 0; i < (unsigned int)rows; i++) {\n G_math_spvector *spvect = G_math_alloc_spvector(1);\n\n switch (prec) {\n case G_MATH_ROWSCALE_EUKLIDNORM_PRECONDITION:\n sum = 0;\n for (j = 0; j < cols; j++)\n sum += A[i][j] * A[i][j];\n spvect->values[0] = 1.0 / sqrt(sum);\n break;\n case G_MATH_ROWSCALE_ABSSUMNORM_PRECONDITION:\n sum = 0;\n for (j = 0; j < cols; j++)\n sum += fabs(A[i][j]);\n spvect->values[0] = 1.0 / (sum);\n break;\n case G_MATH_DIAGONAL_PRECONDITION:\n default:\n spvect->values[0] = 1.0 / A[i][i];\n break;\n }\n\n spvect->index[0] = i;\n spvect->cols = 1;\n ;\n G_math_add_spvector(Msp, spvect, i);\n } #pragma omp parallel for schedule(static) private(i, j, sum) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_krylov.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(i, j, sum) \\", "context_chars": 100, "text": "cols = 1;\n ;\n G_math_add_spvector(Msp, spvect, i);\n }\n }\n else {\nshared(Asp, Msp, rows, cols, prec)\n for (i = 0; i < (unsigned int)rows; i++) {\n G_math_spvector *spvect = G_math_alloc_spvector(1);\n\n switch (prec) {\n case G_MATH_ROWSCALE_EUKLIDNORM_PRECONDITION:\n sum = 0;\n for (j = 0; j < Asp[i]->cols; j++)\n sum += Asp[i]->values[j] * Asp[i]->values[j];\n spvect->values[0] = 1.0 / sqrt(sum);\n break;\n case G_MATH_ROWSCALE_ABSSUMNORM_PRECONDITION:\n sum = 0;\n for (j = 0; j < Asp[i]->cols; j++)\n sum += fabs(Asp[i]->values[j]);\n spvect->values[0] = 1.0 / (sum);\n break;\n case G_MATH_DIAGONAL_PRECONDITION:\n default:\n for (j = 0; j < Asp[i]->cols; j++)\n if (i == Asp[i]->index[j])\n spvect->values[0] = 1.0 / Asp[i]->values[j];\n break;\n }\n\n spvect->index[0] = i;\n spvect->cols = 1;\n ;\n G_math_add_spvector(Msp, spvect, i);\n } #pragma omp parallel for schedule(static) private(i, j, sum) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/n_les_assemble.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, pos, count) schedule(static)", "context_chars": 100, "text": "_assemble_les_2d: starting the parallel assemble loop\");\n\n /* Assemble the matrix in parallel */\nfor (count = 0; count < cell_type_count; count++) {\n i = index_ij[count][0];\n j = index_ij[count][1];\n\n /*create the entries for the */\n N_data_star *items = call->callback(data, geom, i, j);\n\n /* we need a sparse vector pointer anytime */\n G_math_spvector *spvect = NULL;\n\n /*allocate a sprase vector */\n if (les_type == N_SPARSE_LES) {\n spvect = G_math_alloc_spvector(items->count);\n }\n /* initial conditions */\n les->x[count] = N_get_array_2d_d_value(start_val, i, j);\n\n /* the entry in the vector b */\n les->b[count] = items->V;\n\n /* pos describes the position in the sparse vector.\n * the first entry is always the diagonal entry of the matrix*/\n pos = 0;\n\n if (les_type == N_SPARSE_LES) {\n spvect->index[pos] = count;\n spvect->values[pos] = items->C;\n }\n else {\n les->A[count][count] = items->C;\n }\n /* western neighbour, entry is col - 1 */\n if (i > 0) {\n pos = make_les_entry_2d(i, j, -1, 0, count, pos, les, spvect,\n cell_count, status, start_val, items->W,\n cell_type);\n }\n /* eastern neighbour, entry col + 1 */\n if (i < geom->cols - 1) {\n pos = make_les_entry_2d(i, j, 1, 0, count, pos, les, spvect,\n cell_count, status, start_val, items->E,\n cell_type);\n }\n /* northern neighbour, entry row - 1 */\n if (j > 0) {\n pos = make_les_entry_2d(i, j, 0, -1, count, pos, les, spvect,\n cell_count, status, start_val, items->N,\n cell_type);\n }\n /* southern neighbour, entry row + 1 */\n if (j < geom->rows - 1) {\n pos = make_les_entry_2d(i, j, 0, 1, count, pos, les, spvect,\n cell_count, status, start_val, items->S,\n cell_type);\n }\n /*in case of a nine point star, we have additional entries */\n if (items->type == N_9_POINT_STAR) {\n /* north-western neighbour, entry is col - 1 row - 1 */\n if (i > 0 && j > 0) {\n pos = make_les_entry_2d(i, j, -1, -1, count, pos, les, spvect,\n cell_count, status, start_val,\n items->NW, cell_type);\n }\n /* north-eastern neighbour, entry col + 1 row - 1 */\n if (i < geom->cols - 1 && j > 0) {\n pos = make_les_entry_2d(i, j, 1, -1, count, pos, les, spvect,\n cell_count, status, start_val,\n items->NE, cell_type);\n }\n /* south-western neighbour, entry is col - 1 row + 1 */\n if (i > 0 && j < geom->rows - 1) {\n pos = make_les_entry_2d(i, j, -1, 1, count, pos, les, spvect,\n cell_count, status, start_val,\n items->SW, cell_type);\n }\n /* south-eastern neighbour, entry col + 1 row + 1 */\n if (i < geom->cols - 1 && j < geom->rows - 1) {\n pos = make_les_entry_2d(i, j, 1, 1, count, pos, les, spvect,\n cell_count, status, start_val,\n items->SE, cell_type);\n }\n }\n\n /*How many entries in the les */\n if (les->type == N_SPARSE_LES) {\n spvect->cols = pos + 1;\n G_math_add_spvector(les->Asp, spvect, count);\n }\n\n if (items)\n G_free(items);\n } #pragma omp parallel for private(i, j, pos, count) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/n_les_assemble.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k, pos, count) schedule(static)", "context_chars": 100, "text": " }\n }\n }\n\n G_debug(2, \"N_assemble_les_3d: starting the parallel assemble loop\");\n\nfor (count = 0; count < cell_type_count; count++) {\n i = index_ij[count][0];\n j = index_ij[count][1];\n k = index_ij[count][2];\n\n /*create the entries for the */\n N_data_star *items = call->callback(data, geom, i, j, k);\n\n G_math_spvector *spvect = NULL;\n\n /*allocate a sprase vector */\n if (les_type == N_SPARSE_LES)\n spvect = G_math_alloc_spvector(items->count);\n /* initial conditions */\n\n les->x[count] = N_get_array_3d_d_value(start_val, i, j, k);\n\n /* the entry in the vector b */\n les->b[count] = items->V;\n\n /* pos describes the position in the sparse vector.\n * the first entry is always the diagonal entry of the matrix*/\n pos = 0;\n\n if (les_type == N_SPARSE_LES) {\n spvect->index[pos] = count;\n spvect->values[pos] = items->C;\n }\n else {\n les->A[count][count] = items->C;\n }\n /* western neighbour, entry is col - 1 */\n if (i > 0) {\n pos = make_les_entry_3d(i, j, k, -1, 0, 0, count, pos, les, spvect,\n cell_count, status, start_val, items->W,\n cell_type);\n }\n /* eastern neighbour, entry col + 1 */\n if (i < geom->cols - 1) {\n pos = make_les_entry_3d(i, j, k, 1, 0, 0, count, pos, les, spvect,\n cell_count, status, start_val, items->E,\n cell_type);\n }\n /* northern neighbour, entry row -1 */\n if (j > 0) {\n pos = make_les_entry_3d(i, j, k, 0, -1, 0, count, pos, les, spvect,\n cell_count, status, start_val, items->N,\n cell_type);\n }\n /* southern neighbour, entry row +1 */\n if (j < geom->rows - 1) {\n pos = make_les_entry_3d(i, j, k, 0, 1, 0, count, pos, les, spvect,\n cell_count, status, start_val, items->S,\n cell_type);\n }\n /*only for a 7 star entry needed */\n if (items->type == N_7_POINT_STAR || items->type == N_27_POINT_STAR) {\n /* the upper cell (top), entry depth + 1 */\n if (k < geom->depths - 1) {\n pos = make_les_entry_3d(i, j, k, 0, 0, 1, count, pos, les,\n spvect, cell_count, status, start_val,\n items->T, cell_type);\n }\n /* the lower cell (bottom), entry depth - 1 */\n if (k > 0) {\n pos = make_les_entry_3d(i, j, k, 0, 0, -1, count, pos, les,\n spvect, cell_count, status, start_val,\n items->B, cell_type);\n }\n }\n\n /*How many entries in the les */\n if (les->type == N_SPARSE_LES) {\n spvect->cols = pos + 1;\n G_math_add_spvector(les->Asp, spvect, count);\n }\n\n if (items)\n G_free(items);\n } #pragma omp parallel for private(i, j, k, pos, count) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gradient.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(data)", "context_chars": 100, "text": "*data;\n int i, j;\n\n data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, CELL_TYPE);\n\nfor (j = 0; j < TEST_N_NUM_ROWS; j++) {\n for (i = 0; i < TEST_N_NUM_COLS; i++) {\n N_put_array_2d_c_value(data, i, j, 1);\n }\n } #pragma omp parallel for private(i, j) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gradient.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(data)", "context_chars": 100, "text": "data;\n int i, j;\n\n data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, DCELL_TYPE);\n\nfor (j = 0; j < TEST_N_NUM_ROWS; j++) {\n for (i = 0; i < TEST_N_NUM_COLS; i++) {\n N_put_array_2d_d_value(data, i, j, (double)i * j);\n }\n } #pragma omp parallel for private(i, j) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gradient.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(data)", "context_chars": 100, "text": "d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS,\n 1, FCELL_TYPE);\n\nfor (k = 0; k < TEST_N_NUM_DEPTHS; k++) {\n for (j = 0; j < TEST_N_NUM_ROWS; j++) {\n for (i = 0; i < TEST_N_NUM_COLS; i++) {\n N_put_array_3d_f_value(data, i, j, k, 1.0);\n }\n }\n } #pragma omp parallel for private(i, j, k) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gradient.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(data)", "context_chars": 100, "text": "d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS,\n 1, DCELL_TYPE);\n\nfor (k = 0; k < TEST_N_NUM_DEPTHS; k++)\n for (j = 0; j < TEST_N_NUM_ROWS; j++) {\n for (i = 0; i < TEST_N_NUM_COLS; i++) {\n N_put_array_3d_f_value(data, i, j, k, (float)i * j * k);\n }\n } #pragma omp parallel for private(i, j, k) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gwflow.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(data)", "context_chars": 100, "text": "OLS_LOCAL, TEST_N_NUM_ROWS_LOCAL,\n TEST_N_NUM_DEPTHS_LOCAL, 1, 1);\n\nfor (k = 0; k < TEST_N_NUM_DEPTHS_LOCAL; k++)\n for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) {\n for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) {\n\n if (j == 0) {\n N_put_array_3d_d_value(data->phead, i, j, k, 50);\n N_put_array_3d_d_value(data->phead_start, i, j, k, 50);\n N_put_array_3d_d_value(data->status, i, j, k, 2);\n }\n else {\n\n N_put_array_3d_d_value(data->phead, i, j, k, 40);\n N_put_array_3d_d_value(data->phead_start, i, j, k, 40);\n N_put_array_3d_d_value(data->status, i, j, k, 1);\n }\n N_put_array_3d_d_value(data->hc_x, i, j, k, 0.0001);\n N_put_array_3d_d_value(data->hc_y, i, j, k, 0.0001);\n N_put_array_3d_d_value(data->hc_z, i, j, k, 0.0001);\n N_put_array_3d_d_value(data->q, i, j, k, 0.0);\n N_put_array_3d_d_value(data->s, i, j, k, 0.001);\n N_put_array_2d_d_value(data->r, i, j, 0.0);\n N_put_array_3d_d_value(data->nf, i, j, k, 0.1);\n }\n } #pragma omp parallel for private(i, j, k) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gwflow.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(data)", "context_chars": 100, "text": "wflow_data2d(TEST_N_NUM_COLS_LOCAL, TEST_N_NUM_ROWS_LOCAL,\n 1, 1);\n\nfor (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) {\n for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) {\n\n if (j == 0) {\n N_put_array_2d_d_value(data->phead, i, j, 50);\n N_put_array_2d_d_value(data->phead_start, i, j, 50);\n N_put_array_2d_d_value(data->status, i, j, 2);\n }\n else {\n\n N_put_array_2d_d_value(data->phead, i, j, 40);\n N_put_array_2d_d_value(data->phead_start, i, j, 40);\n N_put_array_2d_d_value(data->status, i, j, 1);\n }\n N_put_array_2d_d_value(data->hc_x, i, j, 30.0001);\n N_put_array_2d_d_value(data->hc_y, i, j, 30.0001);\n N_put_array_2d_d_value(data->q, i, j, 0.0);\n N_put_array_2d_d_value(data->s, i, j, 0.001);\n N_put_array_2d_d_value(data->r, i, j, 0.0);\n N_put_array_2d_d_value(data->nf, i, j, 0.1);\n N_put_array_2d_d_value(data->top, i, j, 20.0);\n N_put_array_2d_d_value(data->bottom, i, j, 0.0);\n }\n } #pragma omp parallel for private(i, j) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(cols, rows, type, a) \\", "context_chars": 100, "text": " int i, j, res = 0;\n\n rows = a->rows;\n cols = a->cols;\n type = N_get_array_2d_type(a);\n\nreduction(+ : res)\n for (j = 0; j < rows; j++) {\n for (i = 0; i < cols; i++) {\n if (type == CELL_TYPE) {\n N_put_array_2d_c_value(a, i, j, (CELL)i * (CELL)j);\n if (N_get_array_2d_c_value(a, i, j) != (CELL)i * (CELL)j)\n res++;\n }\n if (type == FCELL_TYPE) {\n N_put_array_2d_f_value(a, i, j, (FCELL)i * (FCELL)j);\n if (N_get_array_2d_f_value(a, i, j) != (FCELL)i * (FCELL)j)\n res++;\n }\n if (type == DCELL_TYPE) {\n N_put_array_2d_d_value(a, i, j, (DCELL)i * (DCELL)j);\n if (N_get_array_2d_d_value(a, i, j) != (DCELL)i * (DCELL)j)\n res++;\n }\n }\n } #pragma omp parallel for private(i, j) shared(cols, rows, type, a) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(rows, cols, a) reduction(+ : res)", "context_chars": 100, "text": "array_2d *a)\n{\n int rows, cols;\n int i, j, res = 0;\n\n cols = a->cols;\n rows = a->rows;\n\nfor (j = 0; j < rows; j++) {\n for (i = 0; i < cols; i++) {\n N_put_array_2d_value_null(a, i, j);\n if (!N_is_array_2d_value_null(a, i, j))\n res++;\n }\n } #pragma omp parallel for private(i, j) shared(rows, cols, a) reduction(+ : res)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(cols, rows, type, a, b) \\", "context_chars": 100, "text": " int i, j, res = 0;\n\n cols = a->cols;\n rows = a->rows;\n type = N_get_array_2d_type(a);\n\nreduction(+ : res)\n for (j = 0; j < rows; j++) {\n for (i = 0; i < cols; i++) {\n if (type == CELL_TYPE) {\n if (N_get_array_2d_c_value(a, i, j) !=\n N_get_array_2d_c_value(b, i, j))\n res++;\n }\n if (type == FCELL_TYPE) {\n if (N_get_array_2d_f_value(a, i, j) !=\n N_get_array_2d_f_value(b, i, j))\n res++;\n }\n if (type == DCELL_TYPE) {\n if (N_get_array_2d_d_value(a, i, j) !=\n N_get_array_2d_d_value(b, i, j))\n res++;\n }\n }\n } #pragma omp parallel for private(i, j) shared(cols, rows, type, a, b) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(depths, rows, cols, type, a) \\", "context_chars": 100, "text": " cols = a->cols;\n rows = a->rows;\n depths = a->depths;\n type = N_get_array_3d_type(a);\n\nreduction(+ : res)\n for (k = 0; k < depths; k++) {\n for (j = 0; j < rows; j++) {\n for (i = 0; i < cols; i++) {\n if (type == FCELL_TYPE) {\n N_put_array_3d_f_value(a, i, j, k,\n (float)i * (float)j * (float)k);\n if (N_get_array_3d_f_value(a, i, j, k) !=\n (float)i * (float)j * (float)k)\n res++;\n }\n if (type == DCELL_TYPE) {\n N_put_array_3d_d_value(a, i, j, k,\n (double)i * (double)j * (double)k);\n if (N_get_array_3d_d_value(a, i, j, k) !=\n (double)i * (double)j * (double)k)\n res++;\n }\n }\n }\n } #pragma omp parallel for private(i, j, k) shared(depths, rows, cols, type, a) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(cols, rows, depths, type, a) \\", "context_chars": 100, "text": " cols = a->cols;\n rows = a->rows;\n depths = a->depths;\n type = N_get_array_3d_type(a);\n\nreduction(+ : res)\n for (k = 0; k < depths; k++) {\n for (j = 0; j < rows; j++) {\n for (i = 0; i < cols; i++) {\n N_put_array_3d_value_null(a, i, j, k);\n if (!N_is_array_3d_value_null(a, i, j, k))\n res++;\n }\n }\n } #pragma omp parallel for private(i, j, k) shared(cols, rows, depths, type, a) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) \\", "context_chars": 100, "text": " rows = a->rows;\n cols = a->cols;\n depths = a->depths;\n type = N_get_array_3d_type(a);\n\nshared(depths, rows, cols, type, a, b) reduction(+ : res)\n for (k = 0; k < depths; k++) {\n for (i = 0; i < rows; i++) {\n for (j = 0; j < cols; j++) {\n if (type == FCELL_TYPE) {\n if (N_get_array_3d_f_value(a, i, j, k) !=\n N_get_array_3d_f_value(b, i, j, k))\n res++;\n }\n if (type == DCELL_TYPE) {\n if (N_get_array_3d_d_value(a, i, j, k) !=\n N_get_array_3d_d_value(b, i, j, k))\n res++;\n }\n }\n }\n } #pragma omp parallel for private(i, j, k) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_solute_transport.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(data)", "context_chars": 100, "text": "_transport_data3d(\n TEST_N_NUM_COLS_LOCAL, TEST_N_NUM_ROWS_LOCAL, TEST_N_NUM_DEPTHS_LOCAL);\n\nfor (k = 0; k < TEST_N_NUM_DEPTHS_LOCAL; k++)\n for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) {\n for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) {\n\n if (j == 0) {\n N_put_array_3d_d_value(data->c, i, j, k, 1);\n N_put_array_3d_d_value(data->c_start, i, j, k, 1);\n N_put_array_3d_d_value(data->status, i, j, k, 3);\n }\n else {\n\n N_put_array_3d_d_value(data->c, i, j, k, 0);\n N_put_array_3d_d_value(data->c_start, i, j, k, 0);\n N_put_array_3d_d_value(data->status, i, j, k, 1);\n }\n N_put_array_3d_d_value(data->diff_x, i, j, k, 0.000001);\n N_put_array_3d_d_value(data->diff_y, i, j, k, 0.000001);\n N_put_array_3d_d_value(data->diff_z, i, j, k, 0.000001);\n N_put_array_3d_d_value(data->q, i, j, k, 0.0);\n N_put_array_3d_d_value(data->cs, i, j, k, 0.0);\n N_put_array_3d_d_value(data->R, i, j, k, 1.0);\n N_put_array_3d_d_value(data->nf, i, j, k, 0.1);\n if (j == 1 && i == 1 && k == 1)\n N_put_array_3d_d_value(data->cs, i, j, k, 5.0);\n }\n } #pragma omp parallel for private(i, j, k) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_solute_transport.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(data)", "context_chars": 100, "text": "t_data2d(TEST_N_NUM_COLS_LOCAL,\n TEST_N_NUM_ROWS_LOCAL);\n\nfor (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) {\n for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) {\n\n if (j == 0) {\n N_put_array_2d_d_value(data->c, i, j, 0);\n N_put_array_2d_d_value(data->c_start, i, j, 0);\n N_put_array_2d_d_value(data->status, i, j, 2);\n }\n else {\n\n N_put_array_2d_d_value(data->c, i, j, 0);\n N_put_array_2d_d_value(data->c_start, i, j, 0);\n N_put_array_2d_d_value(data->status, i, j, 1);\n }\n N_put_array_2d_d_value(data->diff_x, i, j, 0.000001);\n N_put_array_2d_d_value(data->diff_y, i, j, 0.000001);\n N_put_array_2d_d_value(data->cs, i, j, 0.0);\n N_put_array_2d_d_value(data->R, i, j, 1.0);\n N_put_array_2d_d_value(data->q, i, j, 0.0);\n N_put_array_2d_d_value(data->nf, i, j, 0.1);\n N_put_array_2d_d_value(data->top, i, j, 20.0);\n N_put_array_2d_d_value(data->bottom, i, j, 0.0);\n if (j == 1 && i == 1)\n N_put_array_2d_d_value(data->cs, i, j, 1.0);\n }\n } #pragma omp parallel for private(i, j) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_assemble.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(data)", "context_chars": 100, "text": "*data;\n int i, j;\n\n data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, CELL_TYPE);\n\nfor (j = 0; j < TEST_N_NUM_ROWS; j++) {\n for (i = 0; i < TEST_N_NUM_COLS; i++) {\n\n if (j == 1) {\n N_put_array_2d_c_value(data, i, j, 2);\n }\n else {\n N_put_array_2d_c_value(data, i, j, 1);\n }\n }\n } #pragma omp parallel for private(i, j) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_assemble.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(data)", "context_chars": 100, "text": "data;\n int i, j;\n\n data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, DCELL_TYPE);\n\nfor (j = 0; j < TEST_N_NUM_ROWS; j++) {\n for (i = 0; i < TEST_N_NUM_COLS; i++) {\n\n if (j == 1) {\n N_put_array_2d_d_value(data, i, j, 50);\n }\n else {\n N_put_array_2d_d_value(data, i, j, 1);\n }\n }\n } #pragma omp parallel for private(i, j) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_assemble.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(data)", "context_chars": 100, "text": "d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS,\n 1, FCELL_TYPE);\n\nfor (k = 0; k < TEST_N_NUM_DEPTHS; k++)\n for (j = 0; j < TEST_N_NUM_ROWS; j++) {\n for (i = 0; i < TEST_N_NUM_COLS; i++) {\n\n if (i == 0 && j == 1) {\n N_put_array_3d_f_value(data, i, j, k, 2.0);\n }\n else {\n\n N_put_array_3d_f_value(data, i, j, k, 1.0);\n }\n }\n } #pragma omp parallel for private(i, j, k) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_assemble.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(data)", "context_chars": 100, "text": "d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS,\n 1, DCELL_TYPE);\n\nfor (k = 0; k < TEST_N_NUM_DEPTHS; k++)\n for (j = 0; j < TEST_N_NUM_ROWS; j++) {\n for (i = 0; i < TEST_N_NUM_COLS; i++) {\n\n if (i == 0 && j == 1) {\n N_put_array_3d_f_value(data, i, j, k, 50);\n }\n else {\n\n N_put_array_3d_f_value(data, i, j, k, 1);\n }\n }\n } #pragma omp parallel for private(i, j, k) shared(data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_les.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(les)", "context_chars": 100, "text": "lloc_les(TEST_N_NUM_ROWS, N_SPARSE_LES);\n\n G_message(\"\\t * testing les creation in parallel\\n\");\nfor (i = 0; i < TEST_N_NUM_ROWS; i++) {\n for (j = 0; j < TEST_N_NUM_ROWS; j++) {\n if (i != j)\n les->A[i][j] = 2e-2;\n les->A[i][i] = -1e2 - i;\n }\n les->x[i] = 273.15 + i;\n les->b[i] = 1e2 - i;\n } #pragma omp parallel for private(i, j) shared(les)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_les.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(sples, spvector)", "context_chars": 100, "text": "->A[i][i] = -1e2 - i;\n }\n les->x[i] = 273.15 + i;\n les->b[i] = 1e2 - i;\n }\n\nfor (i = 0; i < TEST_N_NUM_ROWS; i++) {\n spvector = G_math_alloc_spvector(TEST_N_NUM_ROWS);\n\n for (j = 0; j < TEST_N_NUM_ROWS; j++)\n if (i != j)\n spvector->index[j] = 2e-2;\n\n spvector->index[0] = i;\n spvector->values[0] = -1e2 - i;\n\n G_math_add_spvector(sples->Asp, spvector, i);\n sples->x[i] = 273.15 + i;\n sples->b[i] = 1e2 - i;\n } #pragma omp parallel for private(i, j) shared(sples, spvector)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bayons/OpenMP/Practica/energy_v2.c", "omp_pragma_line": "#pragma omp parallel for shared(layer)", "context_chars": 100, "text": "tf(stderr,\"Error: Allocating the layer memory\\n\");\n\t\texit( EXIT_FAILURE );\n\t}\n}\n#pragma omp barrier\nfor( k=0; k layer[k-1] && layer[k] > layer[k+1] ) {\n\t\t\t\tif ( layer[k] > maximos[i] ) {\n\t\t\t\t\tmaximos[i] = layer[k];\n\t\t\t\t\tposiciones[i] = k;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for shared(layer)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bayons/OpenMP/Practica/energy_v2.c", "omp_pragma_line": "#pragma omp parallel for shared(layer_copy)", "context_chars": 100, "text": " omp barrier\n#pragma omp parallel for shared(layer)\n\tfor( k=0; kfor( k=0; k layer[k-1] && layer[k] > layer[k+1] ) {\n\t\t\t\tif ( layer[k] > maximos[i] ) {\n\t\t\t\t\tmaximos[i] = layer[k];\n\t\t\t\t\tposiciones[i] = k;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for shared(layer_copy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bayons/OpenMP/Practica/energy_v2.c", "omp_pragma_line": "#pragma omp parallel for shared(layer, layer_copy)", "context_chars": 100, "text": "/* 4.2. Relajacion entre tormentas de particulas */\n\t\t/* 4.2.1. Copiar valores a capa auxiliar */\n//for( k=0; k #pragma omp parallel for shared(layer, layer_copy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bayons/OpenMP/Practica/energy_v2.c", "omp_pragma_line": "#pragma omp parallel for shared(layer, layer_copy) reduction(/:division)", "context_chars": 100, "text": " layer[k];\n\n\t\t/* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */\n//for( k=1; k #pragma omp parallel for shared(layer, layer_copy) reduction(/:division)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": " Index_t numElem)\n{\n //\n // pull in the stresses appropriate to the hydro integration\n //\n\nfor (Index_t i = 0 ; i < numElem ; ++i){\n sigxx[i] = sigyy[i] = sigzz[i] = - domain.p(i) - domain.q(i) ;\n } #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": "_t>(numElem8) ;\n fz_elem = Allocate(numElem8) ;\n }\n // loop over all elements\n\nfor( Index_t k=0 ; k 1) {\n // Eliminate thread writing conflicts at the nodes by giving\n // each element its own copy to write to\n SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],\n &fx_elem[k*8],\n &fy_elem[k*8],\n &fz_elem[k*8] ) ;\n }\n else {\n SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],\n fx_local, fy_local, fz_local ) ;\n\n // copy nodal force contributions to global force arrray.\n for( Index_t lnode=0 ; lnode<8 ; ++lnode ) {\n Index_t gnode = elemToNode[lnode];\n domain.fx(gnode) += fx_local[lnode];\n domain.fy(gnode) += fy_local[lnode];\n domain.fz(gnode) += fz_local[lnode];\n }\n }\n } #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": "need to copy the data out of the temporary\n // arrays used above into the final forces field\nfor( Index_t gnode=0 ; gnode #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem, hourg)", "context_chars": 100, "text": "\n /*************************************************/\n /* compute the hourglass modes */\n\n\nfor(Index_t i2=0;i2 1) {\n fx_local = &fx_elem[i3] ;\n fx_local[0] = hgfx[0];\n fx_local[1] = hgfx[1];\n fx_local[2] = hgfx[2];\n fx_local[3] = hgfx[3];\n fx_local[4] = hgfx[4];\n fx_local[5] = hgfx[5];\n fx_local[6] = hgfx[6];\n fx_local[7] = hgfx[7];\n\n fy_local = &fy_elem[i3] ;\n fy_local[0] = hgfy[0];\n fy_local[1] = hgfy[1];\n fy_local[2] = hgfy[2];\n fy_local[3] = hgfy[3];\n fy_local[4] = hgfy[4];\n fy_local[5] = hgfy[5];\n fy_local[6] = hgfy[6];\n fy_local[7] = hgfy[7];\n\n fz_local = &fz_elem[i3] ;\n fz_local[0] = hgfz[0];\n fz_local[1] = hgfz[1];\n fz_local[2] = hgfz[2];\n fz_local[3] = hgfz[3];\n fz_local[4] = hgfz[4];\n fz_local[5] = hgfz[5];\n fz_local[6] = hgfz[6];\n fz_local[7] = hgfz[7];\n }\n else {\n domain.fx(n0si2) += hgfx[0];\n domain.fy(n0si2) += hgfy[0];\n domain.fz(n0si2) += hgfz[0];\n\n domain.fx(n1si2) += hgfx[1];\n domain.fy(n1si2) += hgfy[1];\n domain.fz(n1si2) += hgfz[1];\n\n domain.fx(n2si2) += hgfx[2];\n domain.fy(n2si2) += hgfy[2];\n domain.fz(n2si2) += hgfz[2];\n\n domain.fx(n3si2) += hgfx[3];\n domain.fy(n3si2) += hgfy[3];\n domain.fz(n3si2) += hgfz[3];\n\n domain.fx(n4si2) += hgfx[4];\n domain.fy(n4si2) += hgfy[4];\n domain.fz(n4si2) += hgfz[4];\n\n domain.fx(n5si2) += hgfx[5];\n domain.fy(n5si2) += hgfy[5];\n domain.fz(n5si2) += hgfz[5];\n\n domain.fx(n6si2) += hgfx[6];\n domain.fy(n6si2) += hgfy[6];\n domain.fz(n6si2) += hgfz[6];\n\n domain.fx(n7si2) += hgfx[7];\n domain.fy(n7si2) += hgfy[7];\n domain.fz(n7si2) += hgfz[7];\n }\n } #pragma omp parallel for firstprivate(numElem, hourg)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": "if (numthreads > 1) {\n // Collect the data from the local arrays into the final force arrays\nfor( Index_t gnode=0 ; gnode #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": "_t>(numElem8) ;\n Real_t *z8n = Allocate(numElem8) ;\n\n /* start loop over elements */\nfor (Index_t i=0 ; i #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": " determ, numElem,\n domain.numNode()) ;\n\n // check for negative element volume\nfor ( Index_t k=0 ; k #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": " domain.sizeX() + 1, domain.sizeY() + 1, domain.sizeZ() + 1,\n true, false) ;\n#endif \n\nfor (Index_t i=0; i #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": "*************/\n\n static inline\nvoid CalcAccelerationForNodes(Domain &domain, Index_t numNode)\n{\n\nfor (Index_t i = 0; i < numNode; ++i) {\n domain.xdd(i) = domain.fx(i) / domain.nodalMass(i);\n domain.ydd(i) = domain.fy(i) / domain.nodalMass(i);\n domain.zdd(i) = domain.fz(i) / domain.nodalMass(i);\n } #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": "lcVelocityForNodes(Domain &domain, const Real_t dt, const Real_t u_cut,\n Index_t numNode)\n{\n\nfor ( Index_t i = 0 ; i < numNode ; ++i )\n {\n Real_t xdtmp, ydtmp, zdtmp ;\n\n xdtmp = domain.xd(i) + domain.xdd(i) * dt ;\n if( FABS(xdtmp) < u_cut ) xdtmp = Real_t(0.0);\n domain.xd(i) = xdtmp ;\n\n ydtmp = domain.yd(i) + domain.ydd(i) * dt ;\n if( FABS(ydtmp) < u_cut ) ydtmp = Real_t(0.0);\n domain.yd(i) = ydtmp ;\n\n zdtmp = domain.zd(i) + domain.zdd(i) * dt ;\n if( FABS(zdtmp) < u_cut ) zdtmp = Real_t(0.0);\n domain.zd(i) = zdtmp ;\n } #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": "*/\n\n static inline\nvoid CalcPositionForNodes(Domain &domain, const Real_t dt, Index_t numNode)\n{\nfor ( Index_t i = 0 ; i < numNode ; ++i )\n {\n domain.x(i) += domain.xd(i) * dt ;\n domain.y(i) += domain.yd(i) * dt ;\n domain.z(i) += domain.zd(i) * dt ;\n } #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem, deltaTime)", "context_chars": 100, "text": "domain, Real_t *vnew, \n Real_t deltaTime, Index_t numElem )\n{\n\n // loop over all elements\nfor( Index_t k=0 ; k #pragma omp parallel for firstprivate(numElem, deltaTime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": "ltatime, numElem) ;\n\n // element loop to do some stuff not included in the elemlib function.\nfor ( Index_t k=0 ; k #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": "notonicQGradientsForElems(Domain& domain, Real_t vnew[])\n{\n Index_t numElem = domain.numElem();\n\nfor (Index_t i = 0 ; i < numElem ; ++i ) {\n const Real_t ptiny = Real_t(1.e-36) ;\n Real_t ax,ay,az ;\n Real_t dxv,dyv,dzv ;\n\n const Index_t *elemToNode = domain.nodelist(i);\n Index_t n0 = elemToNode[0] ;\n Index_t n1 = elemToNode[1] ;\n Index_t n2 = elemToNode[2] ;\n Index_t n3 = elemToNode[3] ;\n Index_t n4 = elemToNode[4] ;\n Index_t n5 = elemToNode[5] ;\n Index_t n6 = elemToNode[6] ;\n Index_t n7 = elemToNode[7] ;\n\n Real_t x0 = domain.x(n0) ;\n Real_t x1 = domain.x(n1) ;\n Real_t x2 = domain.x(n2) ;\n Real_t x3 = domain.x(n3) ;\n Real_t x4 = domain.x(n4) ;\n Real_t x5 = domain.x(n5) ;\n Real_t x6 = domain.x(n6) ;\n Real_t x7 = domain.x(n7) ;\n\n Real_t y0 = domain.y(n0) ;\n Real_t y1 = domain.y(n1) ;\n Real_t y2 = domain.y(n2) ;\n Real_t y3 = domain.y(n3) ;\n Real_t y4 = domain.y(n4) ;\n Real_t y5 = domain.y(n5) ;\n Real_t y6 = domain.y(n6) ;\n Real_t y7 = domain.y(n7) ;\n\n Real_t z0 = domain.z(n0) ;\n Real_t z1 = domain.z(n1) ;\n Real_t z2 = domain.z(n2) ;\n Real_t z3 = domain.z(n3) ;\n Real_t z4 = domain.z(n4) ;\n Real_t z5 = domain.z(n5) ;\n Real_t z6 = domain.z(n6) ;\n Real_t z7 = domain.z(n7) ;\n\n Real_t xv0 = domain.xd(n0) ;\n Real_t xv1 = domain.xd(n1) ;\n Real_t xv2 = domain.xd(n2) ;\n Real_t xv3 = domain.xd(n3) ;\n Real_t xv4 = domain.xd(n4) ;\n Real_t xv5 = domain.xd(n5) ;\n Real_t xv6 = domain.xd(n6) ;\n Real_t xv7 = domain.xd(n7) ;\n\n Real_t yv0 = domain.yd(n0) ;\n Real_t yv1 = domain.yd(n1) ;\n Real_t yv2 = domain.yd(n2) ;\n Real_t yv3 = domain.yd(n3) ;\n Real_t yv4 = domain.yd(n4) ;\n Real_t yv5 = domain.yd(n5) ;\n Real_t yv6 = domain.yd(n6) ;\n Real_t yv7 = domain.yd(n7) ;\n\n Real_t zv0 = domain.zd(n0) ;\n Real_t zv1 = domain.zd(n1) ;\n Real_t zv2 = domain.zd(n2) ;\n Real_t zv3 = domain.zd(n3) ;\n Real_t zv4 = domain.zd(n4) ;\n Real_t zv5 = domain.zd(n5) ;\n Real_t zv6 = domain.zd(n6) ;\n Real_t zv7 = domain.zd(n7) ;\n\n Real_t vol = domain.volo(i)*vnew[i] ;\n Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;\n\n Real_t dxj = Real_t(-0.25)*((x0+x1+x5+x4) - (x3+x2+x6+x7)) ;\n Real_t dyj = Real_t(-0.25)*((y0+y1+y5+y4) - (y3+y2+y6+y7)) ;\n Real_t dzj = Real_t(-0.25)*((z0+z1+z5+z4) - (z3+z2+z6+z7)) ;\n\n Real_t dxi = Real_t( 0.25)*((x1+x2+x6+x5) - (x0+x3+x7+x4)) ;\n Real_t dyi = Real_t( 0.25)*((y1+y2+y6+y5) - (y0+y3+y7+y4)) ;\n Real_t dzi = Real_t( 0.25)*((z1+z2+z6+z5) - (z0+z3+z7+z4)) ;\n\n Real_t dxk = Real_t( 0.25)*((x4+x5+x6+x7) - (x0+x1+x2+x3)) ;\n Real_t dyk = Real_t( 0.25)*((y4+y5+y6+y7) - (y0+y1+y2+y3)) ;\n Real_t dzk = Real_t( 0.25)*((z4+z5+z6+z7) - (z0+z1+z2+z3)) ;\n\n /* find delvk and delxk ( i cross j ) */\n\n ax = dyi*dzj - dzi*dyj ;\n ay = dzi*dxj - dxi*dzj ;\n az = dxi*dyj - dyi*dxj ;\n\n domain.delx_zeta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;\n\n ax *= norm ;\n ay *= norm ;\n az *= norm ;\n\n dxv = Real_t(0.25)*((xv4+xv5+xv6+xv7) - (xv0+xv1+xv2+xv3)) ;\n dyv = Real_t(0.25)*((yv4+yv5+yv6+yv7) - (yv0+yv1+yv2+yv3)) ;\n dzv = Real_t(0.25)*((zv4+zv5+zv6+zv7) - (zv0+zv1+zv2+zv3)) ;\n\n domain.delv_zeta(i) = ax*dxv + ay*dyv + az*dzv ;\n\n /* find delxi and delvi ( j cross k ) */\n\n ax = dyj*dzk - dzj*dyk ;\n ay = dzj*dxk - dxj*dzk ;\n az = dxj*dyk - dyj*dxk ;\n\n domain.delx_xi(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;\n\n ax *= norm ;\n ay *= norm ;\n az *= norm ;\n\n dxv = Real_t(0.25)*((xv1+xv2+xv6+xv5) - (xv0+xv3+xv7+xv4)) ;\n dyv = Real_t(0.25)*((yv1+yv2+yv6+yv5) - (yv0+yv3+yv7+yv4)) ;\n dzv = Real_t(0.25)*((zv1+zv2+zv6+zv5) - (zv0+zv3+zv7+zv4)) ;\n\n domain.delv_xi(i) = ax*dxv + ay*dyv + az*dzv ;\n\n /* find delxj and delvj ( k cross i ) */\n\n ax = dyk*dzi - dzk*dyi ;\n ay = dzk*dxi - dxk*dzi ;\n az = dxk*dyi - dyk*dxi ;\n\n domain.delx_eta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;\n\n ax *= norm ;\n ay *= norm ;\n az *= norm ;\n\n dxv = Real_t(-0.25)*((xv0+xv1+xv5+xv4) - (xv3+xv2+xv6+xv7)) ;\n dyv = Real_t(-0.25)*((yv0+yv1+yv5+yv4) - (yv3+yv2+yv6+yv7)) ;\n dzv = Real_t(-0.25)*((zv0+zv1+zv5+zv4) - (zv3+zv2+zv6+zv7)) ;\n\n domain.delv_eta(i) = ax*dxv + ay*dyv + az*dzv ;\n } #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)", "context_chars": 100, "text": "max_slope();\n Real_t qlc_monoq = domain.qlc_monoq();\n Real_t qqc_monoq = domain.qqc_monoq();\n\nfor ( Index_t ielem = 0 ; ielem < domain.regElemSize(r); ++ielem ) {\n Index_t i = domain.regElemlist(r,ielem);\n Real_t qlin, qquad ;\n Real_t phixi, phieta, phizeta ;\n Int_t bcMask = domain.elemBC(i) ;\n Real_t delvm = 0.0, delvp =0.0;\n\n /* phixi */\n Real_t norm = Real_t(1.) / (domain.delv_xi(i)+ ptiny ) ;\n\n switch (bcMask & XI_M) {\n case XI_M_COMM: /* needs comm data */\n case 0: delvm = domain.delv_xi(domain.lxim(i)); break ;\n case XI_M_SYMM: delvm = domain.delv_xi(i) ; break ;\n case XI_M_FREE: delvm = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvm = 0; /* ERROR - but quiets the compiler */\n break;\n }\n switch (bcMask & XI_P) {\n case XI_P_COMM: /* needs comm data */\n case 0: delvp = domain.delv_xi(domain.lxip(i)) ; break ;\n case XI_P_SYMM: delvp = domain.delv_xi(i) ; break ;\n case XI_P_FREE: delvp = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvp = 0; /* ERROR - but quiets the compiler */\n break;\n }\n\n delvm = delvm * norm ;\n delvp = delvp * norm ;\n\n phixi = Real_t(.5) * ( delvm + delvp ) ;\n\n delvm *= monoq_limiter_mult ;\n delvp *= monoq_limiter_mult ;\n\n if ( delvm < phixi ) phixi = delvm ;\n if ( delvp < phixi ) phixi = delvp ;\n if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;\n if ( phixi > monoq_max_slope) phixi = monoq_max_slope;\n\n\n /* phieta */\n norm = Real_t(1.) / ( domain.delv_eta(i) + ptiny ) ;\n\n switch (bcMask & ETA_M) {\n case ETA_M_COMM: /* needs comm data */\n case 0: delvm = domain.delv_eta(domain.letam(i)) ; break ;\n case ETA_M_SYMM: delvm = domain.delv_eta(i) ; break ;\n case ETA_M_FREE: delvm = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvm = 0; /* ERROR - but quiets the compiler */\n break;\n }\n switch (bcMask & ETA_P) {\n case ETA_P_COMM: /* needs comm data */\n case 0: delvp = domain.delv_eta(domain.letap(i)) ; break ;\n case ETA_P_SYMM: delvp = domain.delv_eta(i) ; break ;\n case ETA_P_FREE: delvp = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvp = 0; /* ERROR - but quiets the compiler */\n break;\n }\n\n delvm = delvm * norm ;\n delvp = delvp * norm ;\n\n phieta = Real_t(.5) * ( delvm + delvp ) ;\n\n delvm *= monoq_limiter_mult ;\n delvp *= monoq_limiter_mult ;\n\n if ( delvm < phieta ) phieta = delvm ;\n if ( delvp < phieta ) phieta = delvp ;\n if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;\n if ( phieta > monoq_max_slope) phieta = monoq_max_slope;\n\n /* phizeta */\n norm = Real_t(1.) / ( domain.delv_zeta(i) + ptiny ) ;\n\n switch (bcMask & ZETA_M) {\n case ZETA_M_COMM: /* needs comm data */\n case 0: delvm = domain.delv_zeta(domain.lzetam(i)) ; break ;\n case ZETA_M_SYMM: delvm = domain.delv_zeta(i) ; break ;\n case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvm = 0; /* ERROR - but quiets the compiler */\n break;\n }\n switch (bcMask & ZETA_P) {\n case ZETA_P_COMM: /* needs comm data */\n case 0: delvp = domain.delv_zeta(domain.lzetap(i)) ; break ;\n case ZETA_P_SYMM: delvp = domain.delv_zeta(i) ; break ;\n case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvp = 0; /* ERROR - but quiets the compiler */\n break;\n }\n\n delvm = delvm * norm ;\n delvp = delvp * norm ;\n\n phizeta = Real_t(.5) * ( delvm + delvp ) ;\n\n delvm *= monoq_limiter_mult ;\n delvp *= monoq_limiter_mult ;\n\n if ( delvm < phizeta ) phizeta = delvm ;\n if ( delvp < phizeta ) phizeta = delvp ;\n if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);\n if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;\n\n /* Remove length scale */\n\n if ( domain.vdov(i) > Real_t(0.) ) {\n qlin = Real_t(0.) ;\n qquad = Real_t(0.) ;\n }\n else {\n Real_t delvxxi = domain.delv_xi(i) * domain.delx_xi(i) ;\n Real_t delvxeta = domain.delv_eta(i) * domain.delx_eta(i) ;\n Real_t delvxzeta = domain.delv_zeta(i) * domain.delx_zeta(i) ;\n\n if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;\n if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;\n if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;\n\n Real_t rho = domain.elemMass(i) / (domain.volo(i) * vnew[i]) ;\n\n qlin = -qlc_monoq * rho *\n ( delvxxi * (Real_t(1.) - phixi) +\n delvxeta * (Real_t(1.) - phieta) +\n delvxzeta * (Real_t(1.) - phizeta) ) ;\n\n qquad = qqc_monoq * rho *\n ( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +\n delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +\n delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;\n }\n\n domain.qq(i) = qquad ;\n domain.ql(i) = qlin ;\n } #pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length)", "context_chars": 100, "text": " Real_t pmin,\n Real_t p_cut, Real_t eosvmax,\n Index_t length, Index_t *regElemList)\n{\nfor (Index_t i = 0; i < length ; ++i) {\n Real_t c1s = Real_t(2.0)/Real_t(3.0) ;\n bvc[i] = c1s * (compression[i] + Real_t(1.));\n pbvc[i] = c1s;\n } #pragma omp parallel for firstprivate(length)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)", "context_chars": 100, "text": ")/Real_t(3.0) ;\n bvc[i] = c1s * (compression[i] + Real_t(1.));\n pbvc[i] = c1s;\n }\n\nfor (Index_t i = 0 ; i < length ; ++i){\n Index_t elem = regElemList[i];\n\n p_new[i] = bvc[i] * e_old[i] ;\n\n if (FABS(p_new[i]) < p_cut )\n p_new[i] = Real_t(0.0) ;\n\n if ( vnewc[elem] >= eosvmax ) /* impossible condition here? */\n p_new[i] = Real_t(0.0) ;\n\n if (p_new[i] < pmin)\n p_new[i] = pmin ;\n } #pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, emin)", "context_chars": 100, "text": " Index_t length, Index_t *regElemList)\n{\n Real_t *pHalfStep = Allocate(length) ;\n\nfor (Index_t i = 0 ; i < length ; ++i) {\n e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i])\n + Real_t(0.5) * work[i];\n\n if (e_new[i] < emin ) {\n e_new[i] = emin ;\n }\n } #pragma omp parallel for firstprivate(length, emin)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, rho0)", "context_chars": 100, "text": "ep, bvc, pbvc, e_new, compHalfStep, vnewc,\n pmin, p_cut, eosvmax, length, regElemList);\n\nfor (Index_t i = 0 ; i < length ; ++i) {\n Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]) ;\n\n if ( delvc[i] > Real_t(0.) ) {\n q_new[i] /* = qq_old[i] = ql_old[i] */ = Real_t(0.) ;\n }\n else {\n Real_t ssc = ( pbvc[i] * e_new[i]\n + vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ;\n\n if ( ssc <= Real_t(.1111111e-36) ) {\n ssc = Real_t(.3333333e-18) ;\n } else {\n ssc = SQRT(ssc) ;\n }\n\n q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;\n }\n\n e_new[i] = e_new[i] + Real_t(0.5) * delvc[i]\n * ( Real_t(3.0)*(p_old[i] + q_old[i])\n - Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;\n } #pragma omp parallel for firstprivate(length, rho0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, emin, e_cut)", "context_chars": 100, "text": ".0)*(p_old[i] + q_old[i])\n - Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;\n }\n\nfor (Index_t i = 0 ; i < length ; ++i) {\n\n e_new[i] += Real_t(0.5) * work[i];\n\n if (FABS(e_new[i]) < e_cut) {\n e_new[i] = Real_t(0.) ;\n }\n if ( e_new[i] < emin ) {\n e_new[i] = emin ;\n }\n } #pragma omp parallel for firstprivate(length, emin, e_cut)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, rho0, emin, e_cut)", "context_chars": 100, "text": "new, bvc, pbvc, e_new, compression, vnewc,\n pmin, p_cut, eosvmax, length, regElemList);\n\nfor (Index_t i = 0 ; i < length ; ++i){\n const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;\n Index_t elem = regElemList[i];\n Real_t q_tilde ;\n\n if (delvc[i] > Real_t(0.)) {\n q_tilde = Real_t(0.) ;\n }\n else {\n Real_t ssc = ( pbvc[i] * e_new[i]\n + vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;\n\n if ( ssc <= Real_t(.1111111e-36) ) {\n ssc = Real_t(.3333333e-18) ;\n } else {\n ssc = SQRT(ssc) ;\n }\n\n q_tilde = (ssc*ql_old[i] + qq_old[i]) ;\n }\n\n e_new[i] = e_new[i] - ( Real_t(7.0)*(p_old[i] + q_old[i])\n - Real_t(8.0)*(pHalfStep[i] + q_new[i])\n + (p_new[i] + q_tilde)) * delvc[i]*sixth ;\n\n if (FABS(e_new[i]) < e_cut) {\n e_new[i] = Real_t(0.) ;\n }\n if ( e_new[i] < emin ) {\n e_new[i] = emin ;\n }\n } #pragma omp parallel for firstprivate(length, rho0, emin, e_cut)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, rho0, q_cut)", "context_chars": 100, "text": "new, bvc, pbvc, e_new, compression, vnewc,\n pmin, p_cut, eosvmax, length, regElemList);\n\nfor (Index_t i = 0 ; i < length ; ++i){\n Index_t elem = regElemList[i];\n\n if ( delvc[i] <= Real_t(0.) ) {\n Real_t ssc = ( pbvc[i] * e_new[i]\n + vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;\n\n if ( ssc <= Real_t(.1111111e-36) ) {\n ssc = Real_t(.3333333e-18) ;\n } else {\n ssc = SQRT(ssc) ;\n }\n\n q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;\n\n if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.) ;\n }\n } #pragma omp parallel for firstprivate(length, rho0, q_cut)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(rho0, ss4o3)", "context_chars": 100, "text": "newc, Real_t *pbvc,\n Real_t *bvc, Real_t ss4o3,\n Index_t len, Index_t *regElemList)\n{\nfor (Index_t i = 0; i < len ; ++i) {\n Index_t elem = regElemList[i];\n Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[elem] * vnewc[elem] *\n bvc[i] * pnewc[i]) / rho0;\n if (ssTmp <= Real_t(.1111111e-36)) {\n ssTmp = Real_t(.3333333e-18);\n }\n else {\n ssTmp = SQRT(ssTmp);\n }\n domain.ss(elem) = ssTmp ;\n } #pragma omp parallel for firstprivate(rho0, ss4o3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElemReg)", "context_chars": 100, "text": "in,\n qq_old, ql_old, rho0, eosvmax,\n numElemReg, regElemList);\n }\n\nfor (Index_t i=0; i #pragma omp parallel for firstprivate(numElemReg)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, v_cut)", "context_chars": 100, "text": "rElems(Domain &domain, Real_t *vnew,\n Real_t v_cut, Index_t length)\n{\n if (length != 0) {\nfor(Index_t i=0 ; i #pragma omp parallel for firstprivate(length, v_cut)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mishal23/parallel-programming-openmp/daxpy-loop.cpp", "omp_pragma_line": "#pragma omp parallel for shared(x,y) private(i)", "context_chars": 100, "text": ")\n{\n\tint i;\n\tomp_set_num_threads(number_of_threads);\n\tint n_per_thread = SIZE/number_of_threads;\n\t\n\tfor(i=0;i #pragma omp parallel for shared(x,y) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mishal23/parallel-programming-openmp/matrix-multiplication.cpp", "omp_pragma_line": "#pragma omp parallel for shared(a,b) private(i,j,k)", "context_chars": 100, "text": ")\n\t{\n\t\tfor(j=0; jfor(i=0; i #pragma omp parallel for shared(a,b) private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mishal23/parallel-programming-openmp/value-of-pi/value-of-pie-thread-safe.cpp", "omp_pragma_line": "#pragma omp parallel for private(x, y, i) reduction(+:count) ", "context_chars": 100, "text": " return pi;\n}\ndouble parallel(int n)\n{\n int i, count=0;\n double x,y,pi;\n\n count = 0;\n for(i=0; i #pragma omp parallel for private(x, y, i) reduction(+:count) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mishal23/parallel-programming-openmp/value-of-pi/value-of-pie-random-generator.cpp", "omp_pragma_line": "#pragma omp parallel for private(x, y, i) reduction(+:count) ", "context_chars": 100, "text": " double x,y,pi;\n\n count = 0;\n\n // removes synchronization issue - hence reduction clause\n for(i=0; i #pragma omp parallel for private(x, y, i) reduction(+:count) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ainsleyrutterford/HPC-OpenCL/openmp_d2q9-bgk.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 4.f / 9.f;\n float w1 = params->density / 9.f;\n float w2 = params->density / 36.f;\n\n for (int jj = 0; jj < params->ny; jj++) {\n for (int ii = 0; ii < params->nx; ii++) {\n // centre\n (*cells_ptr)[(0 * params->ny * params->nx) + (ii + jj*params->nx)] = w0;\n // axis directions\n (*cells_ptr)[(1 * params->ny * params->nx) + (ii + jj*params->nx)] = w1;\n (*cells_ptr)[(2 * params->ny * params->nx) + (ii + jj*params->nx)] = w1;\n (*cells_ptr)[(3 * params->ny * params->nx) + (ii + jj*params->nx)] = w1;\n (*cells_ptr)[(4 * params->ny * params->nx) + (ii + jj*params->nx)] = w1;\n // diagonals\n (*cells_ptr)[(5 * params->ny * params->nx) + (ii + jj*params->nx)] = w2;\n (*cells_ptr)[(6 * params->ny * params->nx) + (ii + jj*params->nx)] = w2;\n (*cells_ptr)[(7 * params->ny * params->nx) + (ii + jj*params->nx)] = w2;\n (*cells_ptr)[(8 * params->ny * params->nx) + (ii + jj*params->nx)] = w2;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ainsleyrutterford/HPC-OpenCL/openmp_d2q9-bgk.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "x) + (ii + jj*params->nx)] = w2;\n }\n }\n\n /* first set all cells in obstacle array to zero */\n for (int jj = 0; jj < params->ny; jj++) {\n for (int ii = 0; ii < params->nx; ii++) {\n (*obstacles_ptr)[ii + jj*params->nx] = 0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/Final Project/gaussian_blur_omp.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "bstr(0, inputfile_name.size() - 4)+ \"_blur_omp.bmp\";\n\n\t\tfor (int j = 0; j < img_height; j++)\n\t\t{\n\t\t\tfor (int i = 0; i < img_width; i++)\n\t\t\t{\n\t\t\t\tpic_out[3 * (j * img_width + i) + MYRED] = gaussian_filter(i, j, MYRED, resolution);\n\t\t\t\tpic_out[3 * (j * img_width + i) + MYGREEN] = gaussian_filter(i, j, MYGREEN, resolution);\n\t\t\t\tpic_out[3 * (j * img_width + i) + MYBLUE] = gaussian_filter(i, j, MYBLUE, resolution);\n\t\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r Class;\n logical verified;\n double zeta_verify_value, epsilon, err;\n\n char *t_names[T_last];\n\n for (i = 0; i < T_last; i++) \n {\n timer_clear(i);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(-:colidx) ", "context_chars": 100, "text": "n, no speed up\n #pragma omp for\n for (j = 0; j < lastrow - firstrow + 1; j++) \n {\n // for (k = rowstr[j]; k < rowstr[j+1]; k+=3) //try loop unrolling\n {\n colidx[k] = colidx[k] - firstcol;\n colidx[k + 1] = colidx[k + 1] - firstcol;\n colidx[k + 2] = colidx[k + 2] - firstcol;\n } #pragma omp parallel for reduction(-:colidx) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ze z to obtain x\n //---------------------------------------------------------------------\n // for (j = 0; j < lastcol - firstcol + 1; j++) { \n x[j] = norm_temp2 * z[j];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ector to (1, 1, .... 1)\n //---------------------------------------------------------------------\n for (i = 0; i < NA+1; i++) {\n x[i] = 1.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:norm_temp1, norm_temp2) private(j)", "context_chars": 100, "text": "// tried the following two pragma, but seems not speed up with these two, inner for bad idea\n // // #pragma omp parallel for reduction(+:norm_temp1, norm_temp2)\n for (j = 0; j < lastcol - firstcol + 1; j++) {\n norm_temp1 = norm_temp1 + x[j]*z[j];\n norm_temp2 = norm_temp2 + z[j]*z[j];\n } #pragma omp parallel for reduction(+:norm_temp1, norm_temp2) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:norm_temp1, norm_temp2)", "context_chars": 100, "text": " for bad idea\n // #pragma omp parallel for reduction(+:norm_temp1, norm_temp2) private(j)\n // for (j = 0; j < lastcol - firstcol + 1; j++) {\n norm_temp1 = norm_temp1 + x[j]*z[j];\n norm_temp2 = norm_temp2 + z[j]*z[j];\n } #pragma omp parallel for reduction(+:norm_temp1, norm_temp2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for, no speed up, inner for bad idea", "context_chars": 100, "text": "ze z to obtain x\n //---------------------------------------------------------------------\n // for (j = 0; j < lastcol - firstcol + 1; j++) \n {\n x[j] = norm_temp2 * z[j];\n } #pragma omp parallel for, no speed up, inner for bad idea"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "significantly faster\n // on the Cray t3d - overall speed of code is 1.5 times faster.\n\n for (j = 0; j < lastrow - firstrow + 1; j++) {\n sum = 0.0;\n //#pragma omp parallel for reduction(+:sum) // no speed up, inner loop for bad idea(too mych overhead) \n for (k = rowstr[j]; k < rowstr[j+1]; k++) {\n sum = sum + a[k]*p[colidx[k]];\n }\n q[j] = sum;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) ", "context_chars": 100, "text": "neck?)!!!!!************\n for (j = 0; j < lastrow - firstrow + 1; j++) {\n sum = 0.0;\n //for (k = rowstr[j]; k < rowstr[j+1]; k++) {\n sum = sum + a[k]*p[colidx[k]];\n } #pragma omp parallel for reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:d)", "context_chars": 100, "text": "in p.q\n //---------------------------------------------------------------------\n d = 0.0;\n for (j = 0; j < lastcol - firstcol + 1; j++) {\n d = d + p[j]*q[j];\n } #pragma omp parallel for reduction(+:d)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:rho)", "context_chars": 100, "text": "ha*q\n //---------------------------------------------------------------------\n rho = 0.0;\n for (j = 0; j < lastcol - firstcol + 1; j++) {\n z[j] = z[j] + alpha*p[j]; \n r[j] = r[j] - alpha*q[j];\n rho = rho + r[j]*r[j];\n } #pragma omp parallel for reduction(+:rho)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " // p = r + beta*p\n //---------------------------------------------------------------------\n for (j = 0; j < lastcol - firstcol + 1; j++) {\n p[j] = r[j] + beta*p[j];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "or the use in sparse.\n //---------------------------------------------------------------------\n //for (iouter = 0; iouter < n; iouter++) {\n nzv = NONZER;\n sprnvc(n, nzv, nn1, vc, ivc);\n vecset(n, vc, ivc, &nzv, iouter+1, 0.5);\n arow[iouter] = nzv;\n #pragma omp parallel for\n for (ivelt = 0; ivelt < nzv; ivelt++) {\n acol[iouter][ivelt] = ivc[ivelt] - 1;\n aelt[iouter][ivelt] = vc[ivelt];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nvc(n, nzv, nn1, vc, ivc);\n vecset(n, vc, ivc, &nzv, iouter+1, 0.5);\n arow[iouter] = nzv;\n for (ivelt = 0; ivelt < nzv; ivelt++) {\n acol[iouter][ivelt] = ivc[ivelt] - 1;\n aelt[iouter][ivelt] = vc[ivelt];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f triples in each row\n //---------------------------------------------------------------------\n //for (j = 0; j < nrows+1; j++) {\n rowstr[j] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "--------\n //#pragma omp parallel for\n for (j = 0; j < nrows+1; j++) {\n rowstr[j] = 0;\n }\n\n //for (i = 0; i < n; i++) {\n for (nza = 0; nza < arow[i]; nza++) {\n j = acol[i][nza] + 1;\n rowstr[j] = rowstr[j] + arow[i];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n j = acol[i][nza] + 1;\n rowstr[j] = rowstr[j] + arow[i];\n }\n }\n\n rowstr[0] = 0;\n //for (j = 1; j < nrows+1; j++) {\n rowstr[j] = rowstr[j] + rowstr[j-1];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ".. preload data pages\n //---------------------------------------------------------------------\n //for (j = 0; j < nrows; j++) {\n for (k = rowstr[j]; k < rowstr[j+1]; k++) {\n a[k] = 0.0;\n colidx[k] = -1;\n }\n nzloc[j] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for, no speed up *************VERIFICATION FAILED WITH THIS ONE*********************", "context_chars": 100, "text": "-------------------------------------\n size = 1.0;\n ratio = pow(rcond, (1.0 / (double)(n)));\n\n //for (i = 0; i < n; i++) {\n for (nza = 0; nza < arow[i]; nza++) {\n j = acol[i][nza];\n\n scale = size * aelt[i][nza];\n for (nzrow = 0; nzrow < arow[i]; nzrow++) {\n jcol = acol[i][nzrow];\n va = aelt[i][nzrow] * scale;\n\n //--------------------------------------------------------------------\n // ... add the identity * rcond to the generated matrix to bound\n // the smallest eigenvalue from below by rcond\n //--------------------------------------------------------------------\n if (jcol == j && j == i) {\n va = va + rcond - shift;\n }\n\n cont40 = false;\n for (k = rowstr[j]; k < rowstr[j+1]; k++) {\n if (colidx[k] > jcol) {\n //----------------------------------------------------------------\n // ... insert colidx here orderly\n //----------------------------------------------------------------\n for (kk = rowstr[j+1]-2; kk >= k; kk--) {\n if (colidx[kk] > -1) {\n a[kk+1] = a[kk];\n colidx[kk+1] = colidx[kk];\n }\n }\n colidx[k] = jcol;\n a[k] = 0.0;\n cont40 = true;\n break;\n } else if (colidx[k] == -1) {\n colidx[k] = jcol;\n cont40 = true;\n break;\n } else if (colidx[k] == jcol) {\n //--------------------------------------------------------------\n // ... mark the duplicated entry\n //--------------------------------------------------------------\n nzloc[j] = nzloc[j] + 1;\n cont40 = true;\n break;\n }\n }\n if (cont40 == false) {\n printf(\"internal error in sparse: i=%d\\n\", i);\n exit(EXIT_FAILURE);\n }\n a[k] = a[k] + va;\n }\n }\n size = size * ratio;\n } #pragma omp parallel for, no speed up *************VERIFICATION FAILED WITH THIS ONE*********************"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "enerate final results\n //---------------------------------------------------------------------\n //for (j = 1; j < nrows; j++) \n {\n nzloc[j] = nzloc[j] + nzloc[j-1];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for, no speed up *************VERIFICATION FAILED WITH THIS ONE*********************", "context_chars": 100, "text": " omp parallel for\n for (j = 1; j < nrows; j++) \n {\n nzloc[j] = nzloc[j] + nzloc[j-1];\n }\n\n //for (j = 0; j < nrows; j++) {\n if (j > 0) {\n j1 = rowstr[j] - nzloc[j-1];\n } else {\n j1 = 0;\n }\n j2 = rowstr[j+1] - nzloc[j];\n nza = rowstr[j];\n for (k = j1; k < j2; k++) {\n a[k] = a[nza];\n colidx[k] = colidx[nza];\n nza = nza + 1;\n }\n } #pragma omp parallel for, no speed up *************VERIFICATION FAILED WITH THIS ONE*********************"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "k < j2; k++) {\n a[k] = a[nza];\n colidx[k] = colidx[nza];\n nza = nza + 1;\n }\n }\n for (j = 1; j < nrows+1; j++) {\n rowstr[j] = rowstr[j] - nzloc[j-1];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for,*************** error openmp cannot be used with break statement******************", "context_chars": 100, "text": "----------------------------------------------------------------\n logical was_gen = false;\n //for (ii = 0; ii < nzv; ii++) {\n if (iv[ii] == i) {\n was_gen = true;\n break;\n }\n } #pragma omp parallel for,*************** error openmp cannot be used with break statement******************"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ouble v[], int iv[], int *nzv, int i, double val)\n{\n int k;\n logical set;\n\n set = false;\n {\n for (k = 0; k < *nzv; k++) \n {\n if (iv[k] == i) \n {\n v[k] = val;\n set = true;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/minar09/parallel-computing/OpenMP/gs_openmp.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_ths) schedule(static, max_cells_per_th) collapse(2) reduction(+:diff)", "context_chars": 100, "text": "w nor the last row are solved\n\t\t// (that's why both 'i' and 'j' start at 1 and go up to '[nm]-1')\n\t\tfor (int i = 1; i < n-1; i++) {\n\t\t\tfor (int j = 1; j < m-1; j++) {\n\n\t\t\t\tconst int pos = (i * m) + j;\n\t\t\t\tconst float temp = (*mat)[pos];\n\n\t\t\t\t(*mat)[pos] = \n\t\t\t\t\t0.25f * (\n\t\t\t\t\t\t(*mat)[pos]\n\t\t\t\t\t\t+ (*mat)[pos - 1]\n\t\t\t\t\t\t+ (*mat)[pos - n]\n\t\t\t\t\t\t+ (*mat)[pos + 1]\n\t\t\t\t\t\t+ (*mat)[pos + n]\n\t\t\t\t\t);\n\n\t\t\t\tdiff += abs((*mat)[pos] - temp);\n\t\t\t}\n\t\t} #pragma omp parallel for num_threads(num_ths) schedule(static, max_cells_per_th) collapse(2) reduction(+:diff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/minar09/parallel-computing/OpenMP/jacobi_openmp.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_ths) schedule(static, max_cells_per_th) collapse(2) reduction(+:diff)", "context_chars": 100, "text": "w nor the last row are solved\n\t\t// (that's why both 'i' and 'j' start at 1 and go up to '[nm]-1')\n\t\tfor (int i = 1; i < n-1; i++) {\n\t\t\tfor (int j = 1; j < m-1; j++) {\n\n\t\t\t\tconst int pos = (i * m) + j;\n\t\t\t\t\n\t\t\t\ttemp[i][j] = \n\t\t\t\t\t0.25f * (\n\t\t\t\t\t\t(*mat)[pos]\n\t\t\t\t\t\t+ (*mat)[pos - 1]\n\t\t\t\t\t\t+ (*mat)[pos - n]\n\t\t\t\t\t\t+ (*mat)[pos + 1]\n\t\t\t\t\t\t+ (*mat)[pos + n]\n\t\t\t\t\t);\n\n\t\t\t\tdiff += abs((*mat)[pos] - temp[i][j]);\n\t\t\t}\n\t\t} #pragma omp parallel for num_threads(num_ths) schedule(static, max_cells_per_th) collapse(2) reduction(+:diff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/dkarageo/Distributed_KNN/source/knn.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " every point in points matrix, find its kNNs in data matrix and\n // store them into results.\n for (int p = 0; p < pointc; p++) {\n\n // Calculate the k nearest neighbors for the current point, by\n // searching on all the available data.\n for (int d = 0; d < matrix_get_rows(data); d++) {\n\n // Calculate the euclidian distance between a queried point and\n // a data point.\n double dist = 0.0;\n for (int i = 0; i < matrix_get_cols(points); i++) {\n dist += pow(matrix_get_cell(points, p, i) -\n matrix_get_cell(data, d, i), 2.0);\n }\n dist = pow(dist, 0.5);\n\n // On first k data, just fill the k positions in results array.\n if (d < k) {\n results[p][d].distance = dist;\n results[p][d].index = i_offset + d;\n\n // When last position in results array gets filled, sort data.\n if (d == k-1) {\n qsort(results[p], k, sizeof(struct KNN_Pair),\n KNN_Pair_asc_comp);\n }\n }\n // Every row in results is initialized and sorted. So if\n // current distance is lesser than the distance of the last nearest\n // neighbor, previous value is replaced by current one.\n else if (dist < results[p][k-1].distance) {\n results[p][k-1].distance = dist;\n // Keep track on the index. i_offset is used as the base for\n // all indexes.\n results[p][k-1].index = i_offset + d;\n qsort(results[p], k, sizeof(struct KNN_Pair), KNN_Pair_asc_comp);\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/envelope-project/laik/examples/spmv2.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,50)", "context_chars": 100, "text": "dexing, from 0)\n laik_get_map_1d(resD, rangeNo, (void**) &res, &rcount);\n\n#ifdef _OPENMP\nfor(int64_t r = fromRow; r < toRow; r++) {\n res[r - fromRow] = 0.0;\n for(int o = m->row[r]; o < m->row[r+1]; o++)\n res[r - fromRow] += m->val[o] * inp[m->col[o]];\n } #pragma omp parallel for schedule(dynamic,50)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/OpenMP/examples_sections/01_sections_nested.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:myresult)", "context_chars": 100, "text": "ons reduction(+:result)\n {\n \n #pragma omp section\n {\n\tdouble myresult = 0;\n for( int jj = 0; jj < N; jj++ )\n\t myresult += heavy_work_0( array[jj] );\n\tresult += myresult;\t \n }\n \n #pragma omp section\n {\n\tdouble myresult = 0;\n #pragma omp parallel for reduction(+:myresult)\n\tfor( int jj = 0; jj < N; jj++ )\n\t myresult += heavy_work_1( array[jj] );\n\tresult += myresult;\t \n } #pragma omp parallel for reduction(+:myresult)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/OpenMP/examples_sections/01_sections_nested.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:myresult)", "context_chars": 100, "text": "ult += myresult;\t \n }\n \n #pragma omp section\n {\n\tdouble myresult = 0;\n for( int jj = 0; jj < N; jj++ )\n\t myresult += heavy_work_1( array[jj] );\n\tresult += myresult;\t \n }\n \n #pragma omp section\n {\n\tdouble myresult = 0;\n #pragma omp parallel for reduction(+:myresult)\n\tfor( int jj = 0; jj < N; jj++ )\n\t myresult += heavy_work_2( array[jj] );\n\tresult += myresult;\t \n } #pragma omp parallel for reduction(+:myresult)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/OpenMP/examples_sections/01_sections_nested.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:myresult)", "context_chars": 100, "text": "ult += myresult;\t \n }\n \n #pragma omp section\n {\n\tdouble myresult = 0;\n for( int jj = 0; jj < N; jj++ )\n\t myresult += heavy_work_2( array[jj] );\n\tresult += myresult;\t \n }\n \n }\n \n }\n \n \n \n double tend = CPU_TIME;\n\n\n /* -----------------------------------------------------------------------------\n * finalize\n * -----------------------------------------------------------------------------\n */\n\n free(array);\n \n printf(\"The result is %g\\nrun took %g of wall-clock time\\n\\n\",\n\t result, tend - tstart );\n\n\nreturn 0;\n}\n\n\n\ndouble heavy_work_0( uint N )\n{\n double guess = 3.141572 / 3;\n \n for( int i = 0; i < N; i++ )\n {\n guess = exp( guess );\n guess = sin( guess );\n\n }\n\n return guess;\n} #pragma omp parallel for reduction(+:myresult)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/OpenMP/threads_affinity/06_touch_by_all.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "as\n // the parallel for has the\n // scheduling as the final one\n\n double _tstart = CPU_TIME_th;\n for ( int ii = 0; ii < N; ii++ )\n array[ii] = (double)ii;\n double _tend = CPU_TIME_th;\n\n printf(\"init takes %g\\n\", _tend - _tstart);\n /* -----------------------------------------------------------------------------\n * calculate\n * -----------------------------------------------------------------------------\n */\n\n\n double S = 0; // this will store the summation\n double th_avg_time = 0; // this will be the average thread runtime\n double th_min_time = 1e11; // this will be the min thread runtime.\n\t\t\t\t\t\t\t // contrasting the average and the min\n\t\t\t\t\t\t\t // time taken by the threads, you may\n\t\t\t\t\t\t\t // have an idea of the unbalance.\n \n double tstart = CPU_TIME;\n\n #if !defined(_OPENMP)\n \n for ( int ii = 0; ii < N; ii++ ) // well, you may notice this implementation\n S += array[ii]; // is particularly inefficient anyway\n\n #else\n\n #pragma omp parallel reduction(+:th_avg_time)\t\t\t\t\\\n reduction(min:th_min_time) // in this region there are 2 different\n { // reductions: the one of runtime, which\n struct timespec myts; // happens in the whole parallel region;\n double mystart = CPU_TIME_th; // and the one on S, which takes place \n #pragma omp for reduction(+:S) // in the for loop. \n for ( int ii = 0; ii < N; ii++ )\n S += array[ii];\n\n th_avg_time = CPU_TIME_th - mystart; \n th_min_time = CPU_TIME_th - mystart; \n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "Number of Threads requested = %i\\n\",k);\n }\n#endif\n\n /* Get initial value for system clock. */\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"Your clock granularity appears to be \"\n\t \"less than one microsecond.\\n\");\n\n t = mysecond();\nfor (j = 0; j < N; j++)\n\ta[j] = 2.0E0 * a[j];\n t = 1.0E6 * (mysecond() - t);\n\n printf(\"Each test below will take on the order\"\n\t\" of %d microseconds.\\n\", (int) t );\n printf(\" (= %d clock ticks)\\n\", (int) (t/quantum) );\n printf(\"Increase the size of the arrays if this shows that\\n\");\n printf(\"you are not getting at least 20 clock ticks per test.\\n\");\n\n printf(HLINE);\n\n printf(\"WARNING -- The above is only a rough guideline.\\n\");\n printf(\"For best results, please be sure you know the\\n\");\n printf(\"precision of your system timer.\\n\");\n printf(HLINE);\n \n /*\t--- MAIN LOOP --- repeat test cases NTIMES times --- */\n\n scalar = 3.0;\n for (k=0; k #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f \\n\",csum);\n\t}\n\telse {\n\t\tprintf (\"Solution Validates\\n\");\n\t}\n}\n\nvoid tuned_STREAM_Copy()\n{\n\tint j;\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " for (j=0; jfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "omp parallel for\n\tfor (j=0; jfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "mber of Threads requested = %i\\n\",k);\n }\n#endif\n\n /* Get initial value for system clock. */\n//for (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"Your clock granularity appears to be \"\n\t \"less than one microsecond.\\n\");\n\n t = mysecond();\nfor (j = 0; j < N; j++)\n\ta[j] = 2.0E0 * a[j];\n t = 1.0E6 * (mysecond() - t);\n\n printf(\"Each test below will take on the order\"\n\t\" of %d microseconds.\\n\", (int) t );\n printf(\" (= %d clock ticks)\\n\", (int) (t/quantum) );\n printf(\"Increase the size of the arrays if this shows that\\n\");\n printf(\"you are not getting at least 20 clock ticks per test.\\n\");\n\n printf(HLINE);\n\n printf(\"WARNING -- The above is only a rough guideline.\\n\");\n printf(\"For best results, please be sure you know the\\n\");\n printf(\"precision of your system timer.\\n\");\n printf(HLINE);\n \n /*\t--- MAIN LOOP --- repeat test cases NTIMES times --- */\n\n scalar = 3.0;\n for (k=0; k #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (k=0; kfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " - times[0][k];\n\t\n\ttimes[1][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Scale(scalar);\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "second() - times[1][k];\n\t\n\ttimes[2][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Add();\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " - times[2][k];\n\t\n\ttimes[3][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Triad(scalar);\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f \\n\",csum);\n\t}\n\telse {\n\t\tprintf (\"Solution Validates\\n\");\n\t}\n}\n\nvoid tuned_STREAM_Copy()\n{\n\tint j;\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " for (j=0; jfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "omp parallel for\n\tfor (j=0; jfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/berkerdemirel/Parallel-Breadth-First-Search-OpenMP-and-CUDA/CPU/hybrid.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:nf) reduction(||:improvement) schedule(guided, 32)", "context_chars": 100, "text": " *distance, int &level, int nov, int *unvisited, int uvSize) {\n\tbool improvement = false;\n\tnf = 0;\n\nfor (int i = 0; i < uvSize; i++) {\n\t\tint v = unvisited[i];\n\t\tif (distance[v] < 0) {\n\t\t\tfor (int j = row_inv[v]; j < row_inv[v + 1]; j++) {\n\t\t\t\tint u = col_inv[j];\n\t\t\t\tif (distance[u] == level) {\n\t\t\t\t\tdistance[v] = level + 1;\n\t\t\t\t\tnf++;\n\t\t\t\t\timprovement = true;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for reduction(+:nf) reduction(||:improvement) schedule(guided, 32)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ck(unsigned long *A, unsigned long *B, const int lda, const int ldb, const int block_size) {\n// // for(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ned long *B, const int n, const int m, const int lda, const int ldb, const int block_size) {\n// // for(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n// }\n\nvoid transpose(unsigned long *src, unsigned long *dst, const int N, const int M) {\n for(int n = 0; n #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " for all consecutive parallel regions\n }\n\n // #pragma omp parallel\n // #pragma omp for\n for (int i = 0; i < n; ++i)\n {\n rows[i*m] = x[i*m];\n for (int j = 1; j < m; ++j)\n {\n rows[i*m + j] = x[i*m + j] + rows[i*m + j - 1];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "} else {\n // out[i*m + j] = x[i*m + j];\n // } \n // }\n // }\n\n for (int i = 0; i < m; ++i)\n {\n rows[i*n] = out[i*n];\n for (int j = 1; j < n; ++j)\n {\n rows[i*n + j] = out[i*n + j] + rows[i*n + j - 1];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bader-Research/snap-graph/src/graph_kernels/vertex_cover.c", "omp_pragma_line": "#pragma omp parallel for private(u,v,j,k)", "context_chars": 100, "text": " memblock1 + 2*G->n;\n position_e = memblock1 + 2*(G->n + G->m);\n n = G->n;\n#ifdef _OPENMP\n for(i=0; idbl_weight_v[i];\n degree_v[i] = G->numEdges[i+1] - G->numEdges[i];\n visited_v[i] = 0;\n if(degree_v[i] == 0)\n visited_v[i]=1;\n for(j=G->numEdges[i]; jnumEdges[i+1]; j++)\n {\n u = i;\n v = G->endV[j];\n delta_e[j] = 0;\n visited_e[j] = 0;\n if(v < u )\n continue; /* we have already covered this case \n when we visited v. */\n for (k=G->numEdges[v]; knumEdges[v+1]; k++)\n {\n if(G->endV[k] == u)\n break;\n }\n position_e[j] = k;\n position_e[k] = j;\n }\n } #pragma omp parallel for private(u,v,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bader-Research/snap-graph/src/graph_kernels/vertex_cover.c", "omp_pragma_line": "#pragma omp parallel for shared(max,max_e,max_u,max_v) private(j,u,v)", "context_chars": 100, "text": " edge_counter = 2*G->m;\n while(edge_counter > 0)\n {\n max = 0;\n#ifdef _OPENMP\n for(i=0; inumEdges[i]; jnumEdges[i+1]; j++)\n {\n u = i;\n v = G->endV[j];\n if(degree_v[u] + degree_v[v] > max)\n {\n max = degree_v[u]+ degree_v[v];\n max_e = j;\n max_u = u;\n max_v = v;\n }\n }\n } #pragma omp parallel for shared(max,max_e,max_u,max_v) private(j,u,v)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bader-Research/snap-graph/src/graph_partitioning/modularity_spectral.c", "omp_pragma_line": "#pragma omp parallel for shared(communitySize) ", "context_chars": 100, "text": "ices belong to this community \n and updating the vertex Vector accordingly. */\n /* reduction(+:degreeSum) */\n for(i=0; in; i++)\n {\n if(v2C[i] == curCommunity)\n {\n {\n communitySize++;\n vertex[communitySize] = i;\n }\n v2pos[i] = communitySize;\n degreeSum += G->numEdges[i+1]-G->numEdges[i];\n }\n } #pragma omp parallel for shared(communitySize) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/visit-dav/visit/src/avt/Expressions/General/avtLambda2Expression.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ", const InputType * gradY, const InputType * gradZ,\n double *lambda2, const int numTuples)\n{\n for (int i = 0; i < numTuples; ++i)\n {\n const int offset = 3*i;\n const double du[3] = { gradX[offset], gradX[offset+1], gradX[offset+2] };\n const double dv[3] = { gradY[offset], gradY[offset+1], gradY[offset+2] };\n const double dw[3] = { gradZ[offset], gradZ[offset+1], gradZ[offset+2] };\n\n COMPUTE_LAMBDA2\n lambda2[i] = lambda[1];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/visit-dav/visit/src/avt/Expressions/General/avtQCriterionExpression.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "dX, const InputType * gradY, const InputType * gradZ,\n double *qCrit, const int numTuples)\n{\n for (int i = 0; i < numTuples; ++i)\n {\n const int offset = 3*i;\n const InputType du[3] = { gradX[offset], gradX[offset+1], gradX[offset+2] };\n const InputType dv[3] = { gradY[offset], gradY[offset+1], gradY[offset+2] };\n const InputType dw[3] = { gradZ[offset], gradZ[offset+1], gradZ[offset+2] };\n\n COMPUTE_Q_CRIT(InputType, qCrit[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/visit-dav/visit/src/avt/Expressions/General/avtMagnitudeExpression.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "d\nompCalculateMagnitude(const ArrayType *vectorIn, ArrayType *scalarOut, const int numTuples)\n{\n for (vtkIdType i = 0; i < numTuples ; ++i)\n {\n const vtkIdType idx = 3*i;\n scalarOut[i] = sqrt((double)vectorIn[idx+0]*(double)vectorIn[idx+0]+\n (double)vectorIn[idx+1]*(double)vectorIn[idx+1]+\n (double)vectorIn[idx+2]*(double)vectorIn[idx+2]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/visit-dav/visit/src/avt/Expressions/General/avtGradientExpression.C", "omp_pragma_line": "#pragma omp parallel for private(i,j,k) shared(in,out)", "context_chars": 100, "text": " }\n }\n else\n {\n#ifdef _OPENMP\n#pragma message(\"Compiling for OpenMP.\")\n#endif\n for (k = 0 ; k < dims2 ; k++)\n {\n for (j = 0 ; j < dims1 ; j++)\n {\n for (i = 0 ; i < dims0 ; i++)\n {\n int index = k*kskip + j*jskip + i*iskip;\n int vec_index = 3*index;\n\n float *pt1 = pts + 3*(index+iskip);\n float *pt2 = pts + 3*(index-iskip);\n if ((i > 0) && (i < (dims0-1)))\n out[vec_index] = in[index+iskip]-in[index-iskip];\n else if (i == 0)\n {\n pt2 = pts + 3*index;\n out[vec_index] = in[index+iskip] - in[index];\n }\n else // i == dims0-1\n {\n pt1 = pts + 3*index;\n out[vec_index] = in[index] - in[index-iskip];\n }\n float diff[3];\n diff[0] = pt1[0] - pt2[0];\n diff[1] = pt1[1] - pt2[1];\n diff[2] = pt1[2] - pt2[2];\n float dist = sqrt(diff[0]*diff[0] + diff[1]*diff[1] + diff[2]*diff[2]);\n if (dist == 0.)\n out[vec_index++] = 0.;\n else\n out[vec_index++] /= dist;\n\n pt1 = pts + 3*(index+jskip);\n pt2 = pts + 3*(index-jskip);\n if ((j > 0) && (j < (dims1-1)))\n out[vec_index] = in[index+jskip] - in[index-jskip];\n else if (j == 0)\n {\n pt2 = pts + 3*index;\n out[vec_index] = in[index+jskip] - in[index];\n }\n else // j == dims1-1\n {\n pt1 = pts + 3*index;\n out[vec_index] = in[index] - in[index-jskip];\n }\n diff[0] = pt1[0] - pt2[0];\n diff[1] = pt1[1] - pt2[1];\n diff[2] = pt1[2] - pt2[2];\n dist = sqrt(diff[0]*diff[0] + diff[1]*diff[1] + diff[2]*diff[2]);\n if (dist == 0.)\n out[vec_index++] = 0.;\n else\n out[vec_index++] /= dist;\n\n pt1 = pts + 3*(index+kskip);\n pt2 = pts + 3*(index-kskip);\n if ((k > 0) && (k < (dims2-1)))\n out[vec_index] = in[index+kskip] - in[index-kskip];\n else if (k == 0)\n {\n pt2 = pts + 3*index;\n out[vec_index] = in[index+kskip] - in[index];\n }\n else // k == dims2-1\n {\n pt1 = pts + 3*index;\n out[vec_index] = in[index] - in[index-kskip];\n }\n diff[0] = pt1[0] - pt2[0];\n diff[1] = pt1[1] - pt2[1];\n diff[2] = pt1[2] - pt2[2];\n dist = sqrt(diff[0]*diff[0] + diff[1]*diff[1] + diff[2]*diff[2]);\n if (dist == 0.)\n out[vec_index++] = 0.;\n else\n out[vec_index++] /= dist;\n }\n }\n } #pragma omp parallel for private(i,j,k) shared(in,out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NumPower/numpower/src/dnn.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "float *A, int lda,\n float *B, int ldb,\n float *C, int ldc)\n{\n int i,j,k;\nfor(i = 0; i < M; ++i){\n for(k = 0; k < K; ++k){\n register float A_PART = ALPHA*A[i*lda+k];\n for(j = 0; j < N; ++j){\n C[i*ldc+j] += A_PART*B[k*ldb+j];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NumPower/numpower/src/dnn.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "float *A, int lda,\n float *B, int ldb,\n float *C, int ldc)\n{\n int i,j,k;\nfor(i = 0; i < M; ++i){\n for(j = 0; j < N; ++j){\n register float sum = 0;\n for(k = 0; k < K; ++k){\n sum += ALPHA*A[i*lda+k]*B[j*ldb + k];\n }\n C[i*ldc+j] += sum;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NumPower/numpower/src/dnn.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "float *A, int lda,\n float *B, int ldb,\n float *C, int ldc)\n{\n int i,j,k;\nfor(i = 0; i < M; ++i){\n for(k = 0; k < K; ++k){\n register float A_PART = ALPHA*A[k*lda+i];\n for(j = 0; j < N; ++j){\n C[i*ldc+j] += A_PART*B[k*ldb+j];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NumPower/numpower/src/dnn.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "float *A, int lda,\n float *B, int ldb,\n float *C, int ldc)\n{\n int i,j,k;\nfor(i = 0; i < M; ++i){\n for(j = 0; j < N; ++j){\n register float sum = 0;\n for(k = 0; k < K; ++k){\n sum += ALPHA*A[i+k*lda]*B[k+j*ldb];\n }\n C[i*ldc+j] += sum;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/assorted/selection/2024_selection_project/bitonic_sort/src/bitonic_omp.cpp", "omp_pragma_line": "#pragma omp parallel for private(increasing)", "context_chars": 100, "text": " itr = (size_t)log2((double)size);\n bool increasing;\n\n for (size_t i = 0; i < itr; ++i)\n {\nfor (size_t j = 0; j < size; j += groupSize)\n {\n increasing = ((j / groupSize) % 2 == 0);\n bitonicMerge(numbers, left + j, groupSize, increasing);\n } #pragma omp parallel for private(increasing)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/talks/programming/parallel_programming/07_optimisation/optim_sols/openmp_optim.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n\n int i = 0;\n for (int step=0; step < n; ++step){\n // update the new array\n for (int y = 0; y < h; ++y) {\n for (int x = 0; x < w; ++x) {\n int N = neighbours(x, y, w, h, cells);\n int idx = y * w + x;\n bool is_on = cells[idx];\n bool new_val = 0;\n if (is_on){\n if (N < A || N > B) new_val = 0;\n else new_val = 1;\n }else{\n if (N == C) new_val = 1;\n }\n buffer[idx] = new_val;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/talks/programming/parallel_programming/01_omp/04_array_sum.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:global_total)", "context_chars": 100, "text": " 0;\n// the reduction syntax is something like (operation:variable), and openmp handles most things.\nfor (int i=0; i < N; ++i){\n global_total += v[i];\n } #pragma omp parallel for reduction(+:global_total)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/talks/programming/parallel_programming/02_omp_vectorised/02_pi.cpp", "omp_pragma_line": "#pragma omp parallel for private(x) reduction(+ \\", "context_chars": 100, "text": "counter\n int i;\n // how many loop iterations should we do\n int n = ceil((1.0 - 0.0) / dx);\n: ans)\n for (i = 0; i < n; ++i) {\n // get x\n x = i * dx;\n // increment ans\n ans += dx * 4.0 / (1.0 + x * x);\n } #pragma omp parallel for private(x) reduction(+ \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/talks/programming/parallel_programming/02_omp_vectorised/02_pi.cpp", "omp_pragma_line": "#pragma omp parallel for simd reduction(+ \\", "context_chars": 100, "text": "ans = 0;\n double x;\n int i;\n int n = ceil(1.0 / dx);\n// only change -> add in parallel for\n: ans)\n // loop is the same\n for (i = 0; i < n; ++i) {\n x = i * dx;\n ans += dx * 4.0 / (1.0 + x * x);\n } #pragma omp parallel for simd reduction(+ \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/balos1/Shi_Tomasi_Feature_Detection/openmp/stfd.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "e_width, int windowsize, data_wrapper_t *eigenvalues)\n{\n\tint w = floor(windowsize/2);\n \tint i, j;\n\n\tfor (i = 0; i < image_height; i++) {\n\t\tfor (j = 0; j < image_width; j++) {\n\t\t\tfloat ixx_sum = 0;\n\t\t\tfloat iyy_sum = 0;\n\t\t\tfloat ixiy_sum = 0;\n\n\t\t\tfor (int k = 0; k < windowsize; k++) {\n\t\t\t\tfor (int m = 0; m < windowsize; m++) {\n\t\t\t\t\tint offseti = -1 * w + k;\n\t\t\t\t\tint offsetj = -1 * w + m;\n\t\t\t\t\tif (i+offseti >= 0 && i+offseti < image_height && j + offsetj >= 0 && j+offsetj < image_width){\n\t\t\t\t\t\tixx_sum += hgrad[(i +offseti) * image_width + (j + offsetj)] * hgrad[(i +offseti) * image_width + (j + offsetj)];\n\t\t\t\t\t\tiyy_sum += vgrad[(i +offseti) * image_width + (j + offsetj)] * vgrad[(i +offseti) * image_width + (j + offsetj)];\n\t\t\t\t\t\tixiy_sum += hgrad[(i +offseti) * image_width + (j + offsetj)] * vgrad[(i +offseti) * image_width + (j + offsetj)];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\teigenvalues[i*image_width+j].x = i;\n\t\t\teigenvalues[i*image_width+j].y = j;\n\t\t\teigenvalues[i*image_width+j].data = min_eigenvalue(ixx_sum, ixiy_sum, ixiy_sum, iyy_sum);\n\t\t}\n\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/balos1/Shi_Tomasi_Feature_Detection/openmp/stfd.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "e, int image_width, int image_height, int kernel_width, int kernel_height, int half)\n{\n\tint i, j;\n\n\tfor (i = 0; i < image_height; i++) {\n\t\tfor (j = 0; j < image_width; j++) {\n\t\t\t// reset accumulator when \"focused\" pixel changes\n\t\t\tfloat sum = 0.0;\n\t\t\t// for each item in the kernel\n\t\t\tfor (int k = 0; k < kernel_height; k++) {\n\t\t\t\tfor (int m = 0; m < kernel_width; m++) {\n\t\t\t\t\tint offseti = -1 * (kernel_height/2) + k;\n\t\t\t\t\tint offsetj = -1 * (kernel_width/2) + m;\n\t\t\t\t\t// Check to make sure we are in the bounds of the image.\n\t\t\t\t\tif (i+offseti >= 0 && i+offseti < image_height && j + offsetj >= 0 && j+offsetj < image_width)\n\t\t\t\t\t\tsum+=(float)(image[(i+offseti) * image_width + (j+offsetj)])*kernel[k*kernel_width +m];\n\t\t\t\t}\n\t\t\t}\n\t\t\tresultimage[i * image_width + j] = sum;\n\t\t}\n\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/joaomlneto/cpds-heat/omp/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) private(diff)", "context_chars": 100, "text": "eads();//NB;\n\tbx = sizex/nbx + ((sizex%nbx) ? 1 : 0);//sizex/nbx;\n\tnby = 1;//NB;\n\tby = sizey/nby;\n\n\tfor (int ii=0; ii #pragma omp parallel for reduction(+:sum) private(diff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/joaomlneto/cpds-heat/omp/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) private(diff, lsw)", "context_chars": 100, "text": " = sizex/nbx + ((sizex%nbx) ? 1 : 0);\n\tnby = 1;\n\tby = sizey/nby;\n// */\n\n\t// Computing \"Red\" blocks\n\tfor (int ii=0; ii #pragma omp parallel for reduction(+:sum) private(diff, lsw)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/joaomlneto/cpds-heat/omp/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) private(diff, lsw)", "context_chars": 100, "text": "\t\tdiff = unew - u[i*sizey+ j];\n\t\t\t\t\tsum += diff * diff;\n\t\t\t\t\tu[i*sizey+j]=unew;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Computing \"Black\" blocks\n\tfor (int ii=0; ii #pragma omp parallel for reduction(+:sum) private(diff, lsw)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/joaomlneto/cpds-heat/omp/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) private(diff)", "context_chars": 100, "text": " omp_get_max_threads();\n\tbx = sizex/nbx + ((sizex%nbx) ? 1 : 0);\n\tnby = 1;\n\tby = sizey/nby;\n// */\n\n\tfor (int ii=0; ii #pragma omp parallel for reduction(+:sum) private(diff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr58706.C", "omp_pragma_line": "#pragma omp parallel for reduction (+: n)", "context_chars": 100, "text": "706\n// { dg-do run }\n// { dg-options \"-std=c++11\" }\n\ntemplate \nT\nfoo ()\n{\n T n = T ();\nfor (T i = [](){ return 3; } #pragma omp parallel for reduction (+: n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr58706.C", "omp_pragma_line": "#pragma omp parallel for reduction (+: n)", "context_chars": 100, "text": " return 3; }(); i < 10; ++i)\n n++;\n return n;\n}\n\ntemplate \nT\nbar ()\n{\n T n = T ();\nfor (T i = [](){ return 1; } #pragma omp parallel for reduction (+: n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr58706.C", "omp_pragma_line": "#pragma omp parallel for reduction (+: n)", "context_chars": 100, "text": " return 4; }(); i < 10; ++i)\n n++;\n return n;\n}\n\ntemplate \nT\nbaz ()\n{\n T n = T ();\nfor (T i = T (); i < [](){ return 7; } #pragma omp parallel for reduction (+: n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/simd14.C", "omp_pragma_line": "#pragma omp parallel for simd linear(r) linear(s:17ULL) linear(t:2)", "context_chars": 100, "text": "e__((noinline, noclone)) int\nfoo (unsigned long long &s, short *&t)\n{\n int i, j = 0;\n int &r = j;\nfor (i = 0; i < 1024; i++)\n bar (r, s, t);\n return j;\n}\n\nint\nmain ()\n{\n int i;\n for (i = 0; i < 2048; i++)\n b[i] = 3 * i;\n unsigned long long s = 12;\n short *t = b;\n int j = foo (s, t);\n for (i = 0; i < 1024; i++)\n if (a[i] != 12 + 24 * i)\n __builtin_abort ();\n if (j != 1024 || s != 12 + 1024 * 17ULL || t != &b[2048])\n __builtin_abort ();\n} #pragma omp parallel for simd linear(r) linear(s:17ULL) linear(t:2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C", "omp_pragma_line": "#pragma omp parallel for private (a) reduction(|:R::r)", "context_chars": 100, "text": "a(e) {} Q a; int &b; void m1 (); };\nint f[64];\n\ntemplate \nvoid\nA::m1 ()\n{\n r = 0;\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd linear (R::r)\n for (R::r = 0; r < 32; R::r++)\n f[r + 8] |= 1;\n for (int i = 0; i < 64; i++)\n if (f[i] != ((i >= 8 && i < 32 + 8) ? 1 : 0))\n __builtin_abort ();\n #pragma omp parallel for lastprivate (T::t)\n for (T::t = 0; T::t < 32; T::t += 3)\n f[T::t + 2] |= 2;\n if (T::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp simd linear (T::t)\n for (T::t = 0; T::t < 32; T::t++)\n f[T::t + 9] |= 4;\n if (T::t != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n r = 0;\n #pragma omp parallel for reduction(|:r)\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd\n for (R::r = 0; r < 32; R::r += 2)\n f[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (a = 0; A::a < 8; a++)\n f[((T::t << 2) | a) + 3] |= 16;\n if (T::t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n f[((T::t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n f[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for private (a) reduction(|:R::r)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C", "omp_pragma_line": "#pragma omp parallel for simd linear (R::r)", "context_chars": 100, "text": "r (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n for (R::r = 0; r < 32; R::r++)\n f[r + 8] |= 1;\n for (int i = 0; i < 64; i++)\n if (f[i] != ((i >= 8 && i < 32 + 8) ? 1 : 0))\n __builtin_abort ();\n #pragma omp parallel for lastprivate (T::t)\n for (T::t = 0; T::t < 32; T::t += 3)\n f[T::t + 2] |= 2;\n if (T::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp simd linear (T::t)\n for (T::t = 0; T::t < 32; T::t++)\n f[T::t + 9] |= 4;\n if (T::t != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n r = 0;\n #pragma omp parallel for reduction(|:r)\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd\n for (R::r = 0; r < 32; R::r += 2)\n f[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (a = 0; A::a < 8; a++)\n f[((T::t << 2) | a) + 3] |= 16;\n if (T::t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n f[((T::t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n f[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for simd linear (R::r)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (T::t)", "context_chars": 100, "text": "t i = 0; i < 64; i++)\n if (f[i] != ((i >= 8 && i < 32 + 8) ? 1 : 0))\n __builtin_abort ();\n for (T::t = 0; T::t < 32; T::t += 3)\n f[T::t + 2] |= 2;\n if (T::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp simd linear (T::t)\n for (T::t = 0; T::t < 32; T::t++)\n f[T::t + 9] |= 4;\n if (T::t != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n r = 0;\n #pragma omp parallel for reduction(|:r)\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd\n for (R::r = 0; r < 32; R::r += 2)\n f[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (a = 0; A::a < 8; a++)\n f[((T::t << 2) | a) + 3] |= 16;\n if (T::t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n f[((T::t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n f[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for lastprivate (T::t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C", "omp_pragma_line": "#pragma omp parallel for reduction(|:r)", "context_chars": 100, "text": " 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n r = 0;\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd\n for (R::r = 0; r < 32; R::r += 2)\n f[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (a = 0; A::a < 8; a++)\n f[((T::t << 2) | a) + 3] |= 16;\n if (T::t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n f[((T::t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n f[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for reduction(|:r)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "r (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n for (R::r = 0; r < 32; R::r += 2)\n f[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (a = 0; A::a < 8; a++)\n f[((T::t << 2) | a) + 3] |= 16;\n if (T::t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (T::t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n f[((T::t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n f[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-6.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\", "context_chars": 100, "text": "*y, B (&w)[1][2])\n{\n A a[9];\n short bb[5] = {};\n short (&b)[5] = bb;\n reduction(*:y[:3]) reduction(|:a[:4]) \\\n\t\t\t reduction(&:w[0:][:2]) reduction(maxb:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[0].t *= 3;\n if ((i & 31) == 2)\n\ty[1].t *= 7;\n if ((i & 63) == 3)\n\ty[2].t *= 17;\n z[i / 32].t += (i & 3);\n if (i < 4)\n\tz[i].t += i;\n a[i / 32].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-6.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\", "context_chars": 100, "text": "), w(w3), z(), a(), b() {}\n __attribute__((noinline, noclone)) void foo ();\n};\n\nvoid\nS::foo ()\n{\n reduction(*:y[:3]) reduction(|:a[:4]) \\\n\t\t\t reduction(&:w[0:][:2]) reduction(maxb:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[0].t *= 3;\n if ((i & 31) == 2)\n\ty[1].t *= 7;\n if ((i & 63) == 3)\n\ty[2].t *= 17;\n z[i / 32].t += (i & 3);\n if (i < 4)\n\tz[i].t += i;\n a[i / 32].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-4.C", "omp_pragma_line": "#pragma omp parallel for reduction (min:f) reduction (max:j)", "context_chars": 100, "text": "(void);\n\ntemplate \nvoid\nfoo ()\n{\n I j = -10000;\n F f = 1024.0;\n int i;\n for (i = 0; i < 4; i++)\n switch (i)\n\t{\n\tcase 0:\n\t if (j < -16) j = -16; break;\n\tcase 1:\n\t if (f > -2.0) f = -2.0; break;\n\tcase 2:\n\t if (j < 8) j = 8; if (f > 9.0) f = 9.0; break;\n\tcase 3:\n\t break;\n\t} #pragma omp parallel for reduction (min:f) reduction (max:j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-4.C", "omp_pragma_line": "#pragma omp parallel for reduction (min:f) reduction (max:j)", "context_chars": 100, "text": " != 8 || f != -2.0)\n abort ();\n}\n\nint\nmain ()\n{\n int j = -10000;\n float f = 1024.0;\n int i;\n for (i = 0; i < 4; i++)\n switch (i)\n\t{\n\tcase 0:\n\t if (j < -16) j = -16; break;\n\tcase 1:\n\t if (f > -2.0) f = -2.0; break;\n\tcase 2:\n\t if (j < 8) j = 8; if (f > 9.0) f = 9.0; break;\n\tcase 3:\n\t break;\n\t} #pragma omp parallel for reduction (min:f) reduction (max:j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (f) reduction (+:n, m) \\", "context_chars": 100, "text": " 72 || l != 1)\n abort ();\n}\n\nvoid\nf2 (J x, J y, J z)\n{\n int f = 0, n = 0, m = 0;\nnum_threads (8) schedule (static, 9) \\\n\t\t\t collapse (6 - 2)\n for (I i = x.end () - 1; i >= x.begin (); --i)\n for (int l = -131; l >= -131; l--)\n for (I j = y.end (); j > y.begin () - 1; j -= 1)\n\t{\n\t for (I k = z.end () - 4; k >= z.begin () + 3; k--)\n\t if (omp_get_num_threads () == 8\n\t\t&& ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)\n\t\t != (omp_get_thread_num () * 9 + f++)))\n\t n++;\n\t else\n\t m++;\n\t} #pragma omp parallel for firstprivate (f) reduction (+:n, m) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (f) reduction (+:n, m) \\", "context_chars": 100, "text": "rt ();\n}\n\ntemplate \nvoid\nf4 (J x, J y, J z)\n{\n int f = 0, n = 0, m = 0;\nnum_threads (8) schedule (static, 9) \\\n\t\t\t collapse (5 - 2)\n for (I i = x.end () - 1; i >= x.begin (); --i)\n {\n for (I j = y.end (); j > y.begin () - 1; j -= 1)\n\t{\n\t for (I k = z.end () - 4; k >= z.begin () + 3; k--)\n\t if (omp_get_num_threads () == 8\n\t\t&& ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)\n\t\t != (omp_get_thread_num () * 9 + f++)))\n\t n++;\n\t else\n\t m++;\n\t}\n } #pragma omp parallel for firstprivate (f) reduction (+:n, m) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (f) reduction (+:n, m) \\", "context_chars": 100, "text": "rt ();\n}\n\ntemplate \nvoid\nf6 (J x, J y, J z)\n{\n int f = 0, n = 0, m = 0;\nnum_threads (8) schedule (static, 9) \\\n\t\t\t collapse (5 - 2)\n for (I i = x.end () - 1; i >= x.begin (); --i)\n {\n for (I j = y.end (); j > y.begin () - 1; j -= 1)\n\t{\n\t for (I k = z.end () - 4; k >= z.begin () + (T) 3; k--)\n\t if (omp_get_num_threads () == 8\n\t\t&& ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)\n\t\t != (omp_get_thread_num () * 9 + f++)))\n\t n++;\n\t else\n\t m++;\n\t}\n } #pragma omp parallel for firstprivate (f) reduction (+:n, m) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (f) reduction (+:n, m) \\", "context_chars": 100, "text": "\n abort ();\n}\n\ntemplate \nvoid\nf8 (J x, J y, J z)\n{\n T f = 0, n = 0, m = 0;\nnum_threads (8) schedule (static, 9) \\\n\t\t\t collapse (6 - 2)\n for (I i = x.end () - 1; i >= x.begin (); --i)\n for (T l = 0; l < 1; l++)\n for (I j = y.end (); j > y.begin () - 1; j -= 1)\n\t{\n\t for (I k = z.end () - 4; k >= z.begin () + 3; k--)\n\t if (omp_get_num_threads () == 8\n\t\t&& ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)\n\t\t != (omp_get_thread_num () * 9 + f++)))\n\t n++;\n\t else\n\t m++;\n\t} #pragma omp parallel for firstprivate (f) reduction (+:n, m) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (f) reduction (+:n, m) \\", "context_chars": 100, "text": ";\n}\n\ntemplate \nvoid\nf10 (J x, J y, J z)\n{\n T f = 0, n = 0, m = 0;\nnum_threads (8) schedule (static, 9) \\\n\t\t\t collapse (6 - 2)\n for (S i = x.end () - 1; i >= x.begin (); --i)\n for (T l = 0; l < 1; l++)\n for (S j = y.end (); j > y.begin () - 1; j -= 1)\n\t{\n\t for (S k = z.end () - 4; k >= z.begin () + 3; k--)\n\t if (omp_get_num_threads () == 8\n\t\t&& ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)\n\t\t != (omp_get_thread_num () * 9 + f++)))\n\t n++;\n\t else\n\t m++;\n\t} #pragma omp parallel for firstprivate (f) reduction (+:n, m) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "d\nbaz (int i)\n{\n if (i < 0 || i >= 2000)\n abort ();\n results[i]++;\n}\n\nvoid\nf1 (int x, int y)\n{\nfor (int i = x; i <= y; i += 6)\n baz (i);\n}\n\nvoid\nf2 (int x, int y)\n{\n int i;\n#pragma omp parallel for private(i)\n for (i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "mp parallel for\n for (int i = x; i <= y; i += 6)\n baz (i);\n}\n\nvoid\nf2 (int x, int y)\n{\n int i;\nfor (i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n}\n\ntemplate \nvoid\nf3 (int x, int y)\n{\n#pragma omp parallel for\n for (int i = x; i <= y; i = i + 9 - 8)\n baz (i);\n} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n}\n\ntemplate \nvoid\nf3 (int x, int y)\n{\nfor (int i = x; i <= y; i = i + 9 - 8)\n baz (i);\n}\n\ntemplate \nvoid\nf4 (int x, int y)\n{\n int i;\n#pragma omp parallel for lastprivate(i)\n for (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for lastprivate(i)", "context_chars": 100, "text": "= x; i <= y; i = i + 9 - 8)\n baz (i);\n}\n\ntemplate \nvoid\nf4 (int x, int y)\n{\n int i;\nfor (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n}\n\nvoid\nf5 (int x, int y)\n{\n#pragma omp parallel for\n for (int i = x + 2000 - 64; i > y + 10L; i -= 10L)\n baz (i);\n} #pragma omp parallel for lastprivate(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " lastprivate(i)\n for (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n}\n\nvoid\nf5 (int x, int y)\n{\nfor (int i = x + 2000 - 64; i > y + 10L; i -= 10L)\n baz (i);\n}\n\ntemplate \nvoid\nf6 (int x, int y)\n{\n#pragma omp parallel for\n for (int i = x + 2000 - 64; i > y + 10L; i = i - 12 + 2L)\n baz (i + N);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i = x + 2000 - 64; i > y + 10L; i -= 10L)\n baz (i);\n}\n\ntemplate \nvoid\nf6 (int x, int y)\n{\nfor (int i = x + 2000 - 64; i > y + 10L; i = i - 12 + 2L)\n baz (i + N);\n}\n\ntemplate \nvoid\nf7 (int i, int x, int y)\n{\n#pragma omp parallel for\n for (i = x - 10; i <= y + 10; i += N)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i > y + 10L; i = i - 12 + 2L)\n baz (i + N);\n}\n\ntemplate \nvoid\nf7 (int i, int x, int y)\n{\nfor (i = x - 10; i <= y + 10; i += N)\n baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n int i;\n#pragma omp parallel for\n for (i = j.begin (); i <= j.end () + N; i += 2)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r (i = x - 10; i <= y + 10; i += N)\n baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n int i;\nfor (i = j.begin (); i <= j.end () + N; i += 2)\n baz (i);\n}\n\ntemplate \nvoid\nf9 (T x, T y)\n{\n#pragma omp parallel for\n for (T i = x; i <= y; i = i + N)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "n (); i <= j.end () + N; i += 2)\n baz (i);\n}\n\ntemplate \nvoid\nf9 (T x, T y)\n{\nfor (T i = x; i <= y; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf10 (T x, T y)\n{\n T i;\n#pragma omp parallel for\n for (i = x; i > y; i = i + N)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = x; i <= y; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf10 (T x, T y)\n{\n T i;\nfor (i = x; i > y; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf11 (T x, long y)\n{\n#pragma omp parallel\n {\n#pragma omp for nowait\n for (T i = x; i <= y; i += 3L)\n baz (i);\n#pragma omp single\n baz (y + 3);\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (i);\n#pragma omp single\n baz (y + 3);\n }\n}\n\ntemplate \nvoid\nf12 (T x, T y)\n{\n T i;\nfor (i = x; i > y; --i)\n baz (i);\n}\n\n#define check(expr) \\\n for (int i = 0; i < 2000; i++)\t\t\t\\\n if (expr)\t\t\t\t\t\t\\\n {\t\t\t\t\t\t\t\\\n\tif (results[i] != 1)\t\t\t\t\\\n\t abort ();\t\t\t\t\t\\\n\tresults[i] = 0;\t\t\t\t\t\\\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/nested-1.C", "omp_pragma_line": "#pragma omp parallel for num_threads(2) shared (i)", "context_chars": 100, "text": "// { dg-do run }\n\nextern \"C\" void abort(void);\n#define N 1000\n\nint foo()\n{\n int i = 0, j;\n\n for (j = 0; j < N; ++j)\n {\n #pragma omp parallel num_threads(1) shared (i)\n {\n\t#pragma omp atomic\n\ti++;\n }\n } #pragma omp parallel for num_threads(2) shared (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-9.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:N][:][0:N], z[:4]) \\", "context_chars": 100, "text": "o (int (*&x)[3][N], int *y, long (&w)[1][N])\n{\n unsigned long long a[9] = {};\n short b[5] = {};\n reduction(*:y[:3]) reduction(|:a[:4]) \\\n\t\t\t reduction(&:w[0:][:N]) reduction(max:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[0] *= 3;\n if ((i & 31) == N)\n\ty[1] *= 7;\n if ((i & 63) == 3)\n\ty[N] *= 17;\n z[i / 32] += (i & 3);\n if (i < 4)\n\tz[i] += i;\n a[i / 32] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[N])\n\tb[N] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:N][:][0:N], z[:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-9.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:N][:][0:N], z[:4]) \\", "context_chars": 100, "text": "bb) {}\n __attribute__((noinline, noclone)) void foo ();\n};\n\ntemplate \nvoid\nS::foo ()\n{\n reduction(*:y[:3]) reduction(|:a[:4]) \\\n\t\t\t reduction(&:w[0:][:N]) reduction(max:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[0] *= 3;\n if ((i & 31) == N)\n\ty[1] *= 7;\n if ((i & 63) == 3)\n\ty[N] *= 17;\n z[i / 32] += (i & 3);\n if (i < 4)\n\tz[i] += i;\n a[i / 32] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[N])\n\tb[N] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:N][:][0:N], z[:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/target-2.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": ", er[x:x]) \\\n\t\t map(to: fr[0:x], gr[0:x], hr[2 * x:x], ir[2 * x:x]) \\\n\t\t map(tofrom: s)\n for (j = 0; j < x; j++)\n\ts += br[j] * cr[j] + dr[x + j] + er[x + j]\n\t + fr[j] + gr[j] + hr[2 * x + j] + ir[2 * x + j];\n return s;\n}\n\nint\nmain ()\n{\n double d[1024];\n double ebuf[1024];\n double *e = ebuf;\n fn1 (br, cr, 128);\n fn1 (d + 128, e + 128, 128);\n fn1 (fr, gr, 128);\n double h = fn2 (128, d, e);\n if (h != 20416.0)\n abort ();\n return 0;\n} #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-7.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\", "context_chars": 100, "text": "p7];\n for (int i = 0; i < p7 + 4; i++)\n {\n if (i < p7)\n\tb[i] = -6;\n a[i] = 0;\n }\n reduction(*:y[:p4]) reduction(|:a[:p5]) \\\n\t\t\t reduction(&:w[0:p6 - 1][:p6]) reduction(max:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[0] *= 3;\n if ((i & 31) == 2)\n\ty[1] *= 7;\n if ((i & 63) == 3)\n\ty[2] *= 17;\n z[i / 32] += (i & 3);\n if (i < 4)\n\tz[i] += i;\n a[i / 32] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-7.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\", "context_chars": 100, "text": "nt, long, short);\n};\n\nvoid\nS::foo (int p1, long p2, long p3, int p4, int p5, long p6, short p7)\n{\n reduction(*:y[:p4]) reduction(|:a[:p5]) \\\n\t\t\t reduction(&:w[0:p6 - 1][:p6]) reduction(max:b[0:p7])\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[0] *= 3;\n if ((i & 31) == 2)\n\ty[1] *= 7;\n if ((i & 63) == 3)\n\ty[2] *= 17;\n z[i / 32] += (i & 3);\n if (i < 4)\n\tz[i] += i;\n a[i / 32] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-8.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\", "context_chars": 100, "text": " a[p7 + 4];\n short bb[p7];\n short (&b)[p7] = bb;\n for (int i = 0; i < p7; i++)\n bb[i] = -6;\n reduction(*:y[:p4]) reduction(|:a[:p5]) \\\n\t\t\t reduction(&:w[0:p6 - 1][:p6]) reduction(maxb:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[0].t *= 3;\n if ((i & 31) == 2)\n\ty[1].t *= 7;\n if ((i & 63) == 3)\n\ty[2].t *= 17;\n z[i / 32].t += (i & 3);\n if (i < 4)\n\tz[i].t += i;\n a[i / 32].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-8.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2][0:2], z[:p3]) \\", "context_chars": 100, "text": "nt, long, short);\n};\n\nvoid\nS::foo (int p1, long p2, long p3, int p4, int p5, long p6, short p7)\n{\n reduction(*:y[:p4]) reduction(|:a[:p5]) \\\n\t\t\t reduction(&:w[0:p6 - 1][:p6]) reduction(maxb:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[0].t *= 3;\n if ((i & 31) == 2)\n\ty[1].t *= 7;\n if ((i & 63) == 3)\n\ty[2].t *= 17;\n z[i / 32].t += (i & 3);\n if (i < 4)\n\tz[i].t += i;\n a[i / 32].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:p1 + 1][:p2][0:2], z[:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-10.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2 + N - 2], z[:p3]) \\", "context_chars": 100, "text": " a[p7 + 4];\n short bb[p7];\n short (&b)[p7] = bb;\n for (int i = 0; i < p7; i++)\n bb[i] = -6;\n reduction(*:y[:p4]) reduction(|:a[:p5 - N + 2]) \\\n\t\t\t reduction(&:w[0:p6 - 3 + N][:p6]) reduction(maxb:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[0].t *= 3;\n if ((i & 31) == N)\n\ty[1].t *= 7;\n if ((i & 63) == 3)\n\ty[N].t *= 17;\n z[i / 32].t += (i & 3);\n if (i < 4)\n\tz[i].t += i;\n a[i / 32].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[N])\n\tb[N] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:p1 + 1][:p2 + N - 2], z[:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-10.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2][0:N], z[:p3 + N - 2]) \\", "context_chars": 100, "text": "\n\ntemplate \nvoid\nS::foo (int p1, long p2, long p3, int p4, int p5, long p6, short p7)\n{\n reduction(*:y[:p4]) reduction(|:a[:p5]) \\\n\t\t\t reduction(&:w[0:p6 - 3 + N][:p6]) reduction(maxb:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[0].t *= 3;\n if ((i & 31) == N)\n\ty[1].t *= 7;\n if ((i & 63) == 3)\n\ty[N].t *= 17;\n z[i / 32].t += (i & 3);\n if (i < 4)\n\tz[i].t += i;\n a[i / 32].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[N])\n\tb[N] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:p1 + 1][:p2][0:N], z[:p3 + N - 2]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "de \n\nint\ntest1 ()\n{\n short int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "t ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "t ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "t ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: 4)", "context_chars": 100, "text": "int a[256];\n\n__attribute__((noinline, noclone)) int\nf1 (int i)\n{\n for (int j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1)", "context_chars": 100, "text": ";\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) short int &\nf2 (short int &i, char k)\n{\n for (long j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k)", "context_chars": 100, "text": ";\n }\n return i;\n}\n\ntemplate \n__attribute__((noinline, noclone)) T\nf3 (T i, T k)\n{\n for (short j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: 4) schedule(static, 3)", "context_chars": 100, "text": " 4;\n }\n return i;\n}\n\ntemplate \n__attribute__((noinline, noclone)) T &\nf4 (T &i)\n{\n for (int j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: 4) schedule(static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1) schedule(static, 5)", "context_chars": 100, "text": " 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf5 (short int i, char &k)\n{\n for (long j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1) schedule(static, 5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k) schedule(static, 7)", "context_chars": 100, "text": " \n__attribute__((noinline, noclone)) long long int\nf6 (long long int i, long long int k)\n{\n for (short j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k) schedule(static, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: 4) schedule(dynamic, 3)", "context_chars": 100, "text": "a[i] = j;\n i += 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) int\nf7 (int &i)\n{\n for (int j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: 4) schedule(dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1) schedule(dynamic, 5)", "context_chars": 100, "text": "= 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf8 (short int i, char k)\n{\n for (long j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1) schedule(dynamic, 5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k) schedule(dynamic, 7)", "context_chars": 100, "text": "rn i;\n}\n\n__attribute__((noinline, noclone)) long long int\nf9 (long long int i, long long int k)\n{\n for (short j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k) schedule(dynamic, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: 4)", "context_chars": 100, "text": "eturn i;\n}\n\ntemplate \n__attribute__((noinline, noclone)) T &\nf10 (T &i, long &step)\n{\n for (int j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1)", "context_chars": 100, "text": " return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf11 (short int i, char k, char step)\n{\n for (long j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k)", "context_chars": 100, "text": "attribute__((noinline, noclone)) long long int\nf12 (long long int i, long long int k, int step)\n{\n for (short j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: 4) schedule(static, 3)", "context_chars": 100, "text": "4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) int\nf13 (int &i, long long int step)\n{\n for (int j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: 4) schedule(static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1) schedule(static, 5)", "context_chars": 100, "text": "eturn i;\n}\n\n__attribute__((noinline, noclone)) short int\nf14 (short int &i, char &k, int &step)\n{\n for (long j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1) schedule(static, 5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k) schedule(static, 7)", "context_chars": 100, "text": "bute__((noinline, noclone)) long long int\nf15 (long long int i, long long int k, long int step)\n{\n for (short j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k) schedule(static, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: 4) schedule(dynamic, 3)", "context_chars": 100, "text": " 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) int\nf16 (int i, long long int step)\n{\n for (int j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: 4) schedule(dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1) schedule(dynamic, 5)", "context_chars": 100, "text": " return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf17 (short int i, char k, int step)\n{\n for (long j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1) schedule(dynamic, 5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/linear-1.C", "omp_pragma_line": "#pragma omp parallel for linear (i: k) schedule(dynamic, 7)", "context_chars": 100, "text": " i;\n}\n\ntemplate \n__attribute__((noinline, noclone)) T\nf18 (T i, T k, long int step)\n{\n for (short j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k) schedule(dynamic, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\nfor (std::basic_string::iterator i = x; i <= y; i += 6)\n baz (i);\n}\n\nvoid\nf2 (const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\n std::basic_string::iterator i;\n#pragma omp parallel for private(i)\n for (i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": " &x,\n const std::basic_string::iterator &y)\n{\n std::basic_string::iterator i;\nfor (i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n}\n\ntemplate \nvoid\nf3 (const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\n#pragma omp parallel for schedule (dynamic, 6)\n for (std::basic_string::iterator i = x; i <= y; i = i + 9 - 8)\n baz (i);\n} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 6)", "context_chars": 100, "text": "const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\nfor (std::basic_string::iterator i = x; i <= y; i = i + 9 - 8)\n baz (i);\n}\n\ntemplate \nvoid\nf4 (const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\n std::basic_string::iterator i;\n#pragma omp parallel for lastprivate(i)\n for (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n} #pragma omp parallel for schedule (dynamic, 6)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for lastprivate(i)", "context_chars": 100, "text": " &x,\n const std::basic_string::iterator &y)\n{\n std::basic_string::iterator i;\nfor (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n}\n\nvoid\nf5 (const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\n#pragma omp parallel for schedule (static, 10)\n for (std::basic_string::iterator i = x + 2000 - 64;\n i > y + 10; i -= 10)\n baz (i);\n} #pragma omp parallel for lastprivate(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 10)", "context_chars": 100, "text": "const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\nfor (std::basic_string::iterator i = x + 2000 - 64;\n i > y + 10; i -= 10)\n baz (i);\n}\n\ntemplate \nvoid\nf6 (const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\n#pragma omp parallel for schedule (runtime)\n for (std::basic_string::iterator i = x + 2000 - 64;\n i > y + 10; i = i - 12 + 2)\n {\n std::basic_string::iterator j = i + N;\n baz (j);\n }\n} #pragma omp parallel for schedule (static, 10)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\nfor (std::basic_string::iterator i = x + 2000 - 64;\n i > y + 10; i = i - 12 + 2)\n {\n std::basic_string::iterator j = i + N;\n baz (j);\n } #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 6)", "context_chars": 100, "text": "const std::basic_string::iterator &x,\n const std::basic_string::iterator &y)\n{\nfor (i = x - 10; i <= y + 10; i += N)\n baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n std::basic_string::iterator i;\n#pragma omp parallel for schedule (dynamic, 40)\n for (i = j.begin (); i <= j.end () + N; i += 2)\n baz (i);\n} #pragma omp parallel for schedule (dynamic, 6)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 40)", "context_chars": 100, "text": "baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n std::basic_string::iterator i;\nfor (i = j.begin (); i <= j.end () + N; i += 2)\n baz (i);\n}\n\ntemplate \nvoid\nf9 (const typename std::basic_string::iterator &x,\n const typename std::basic_string::iterator &y)\n{\n#pragma omp parallel for schedule (static, 25)\n for (typename std::basic_string::iterator i = x; i <= y; i = i + N)\n baz (i);\n} #pragma omp parallel for schedule (dynamic, 40)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 25)", "context_chars": 100, "text": "typename std::basic_string::iterator &x,\n const typename std::basic_string::iterator &y)\n{\nfor (typename std::basic_string::iterator i = x; i <= y; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf10 (const typename std::basic_string::iterator &x,\n const typename std::basic_string::iterator &y)\n{\n typename std::basic_string::iterator i;\n#pragma omp parallel for\n for (i = x; i > y; i = i + N)\n baz (i);\n} #pragma omp parallel for schedule (static, 25)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const typename std::basic_string::iterator &y)\n{\n typename std::basic_string::iterator i;\nfor (i = x; i > y; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf11 (const T &x, const T &y)\n{\n#pragma omp parallel\n {\n#pragma omp for nowait schedule (static, 2)\n for (T i = x; i <= y; i += 3)\n baz (i);\n#pragma omp single\n {\n T j = y + 3;\n baz (j);\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 130)", "context_chars": 100, "text": "y + 3;\n baz (j);\n }\n }\n}\n\ntemplate \nvoid\nf12 (const T &x, const T &y)\n{\n T i;\nfor (i = x; i > y; --i)\n baz (i);\n}\n\ntemplate \nstruct K\n{\n template \n static void\n f13 (const T &x, const T &y)\n {\n#pragma omp parallel for schedule (runtime)\n for (T i = x; i <= y + N; i += N)\n baz (i);\n }\n} #pragma omp parallel for schedule (dynamic, 130)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-4.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "emplate \nstruct K\n{\n template \n static void\n f13 (const T &x, const T &y)\n {\nfor (T i = x; i <= y + N; i += N)\n baz (i);\n }\n};\n\n#define check(expr) \\\n for (int i = 0; i < 2000; i++)\t\t\t\\\n if (expr)\t\t\t\t\t\t\\\n {\t\t\t\t\t\t\t\\\n\tif (a[i] != L'a' + i + 1)\t\t\t\\\n\t std::abort ();\t\t\t\t\\\n\ta[i] = L'a' + i;\t\t\t\t\\\n } #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-3.C", "omp_pragma_line": "#pragma omp parallel for private (a) reduction(|:R::r)", "context_chars": 100, "text": "tual public T { A () : b(c) {} int a; int &b; void m1 (); };\nint d[64];\n\nvoid\nA::m1 ()\n{\n r = 0;\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd linear (R::r)\n for (R::r = 0; r < 32; R::r++)\n d[r + 8] |= 1;\n for (int i = 0; i < 64; i++)\n if (d[i] != ((i >= 8 && i < 32 + 8) ? 1 : 0))\n __builtin_abort ();\n #pragma omp parallel for lastprivate (t)\n for (T::t = 0; t < 32; t += 3)\n d[T::t + 2] |= 2;\n if (T::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp simd linear (t)\n for (t = 0; t < 32; t++)\n d[T::t + 9] |= 4;\n if (t != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n r = 0;\n #pragma omp parallel for reduction(|:r)\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd\n for (R::r = 0; r < 32; R::r += 2)\n d[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; t < 7; t += 2)\n for (a = 0; A::a < 8; a++)\n d[((t << 2) | a) + 3] |= 16;\n if (t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n d[((t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n d[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for private (a) reduction(|:R::r)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-3.C", "omp_pragma_line": "#pragma omp parallel for simd linear (R::r)", "context_chars": 100, "text": "r (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n for (R::r = 0; r < 32; R::r++)\n d[r + 8] |= 1;\n for (int i = 0; i < 64; i++)\n if (d[i] != ((i >= 8 && i < 32 + 8) ? 1 : 0))\n __builtin_abort ();\n #pragma omp parallel for lastprivate (t)\n for (T::t = 0; t < 32; t += 3)\n d[T::t + 2] |= 2;\n if (T::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp simd linear (t)\n for (t = 0; t < 32; t++)\n d[T::t + 9] |= 4;\n if (t != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n r = 0;\n #pragma omp parallel for reduction(|:r)\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd\n for (R::r = 0; r < 32; R::r += 2)\n d[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; t < 7; t += 2)\n for (a = 0; A::a < 8; a++)\n d[((t << 2) | a) + 3] |= 16;\n if (t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n d[((t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n d[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for simd linear (R::r)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-3.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (t)", "context_chars": 100, "text": "t i = 0; i < 64; i++)\n if (d[i] != ((i >= 8 && i < 32 + 8) ? 1 : 0))\n __builtin_abort ();\n for (T::t = 0; t < 32; t += 3)\n d[T::t + 2] |= 2;\n if (T::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp simd linear (t)\n for (t = 0; t < 32; t++)\n d[T::t + 9] |= 4;\n if (t != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n r = 0;\n #pragma omp parallel for reduction(|:r)\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd\n for (R::r = 0; r < 32; R::r += 2)\n d[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; t < 7; t += 2)\n for (a = 0; A::a < 8; a++)\n d[((t << 2) | a) + 3] |= 16;\n if (t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n d[((t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n d[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for lastprivate (t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-3.C", "omp_pragma_line": "#pragma omp parallel for reduction(|:r)", "context_chars": 100, "text": " 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n r = 0;\n for (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for simd\n for (R::r = 0; r < 32; R::r += 2)\n d[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; t < 7; t += 2)\n for (a = 0; A::a < 8; a++)\n d[((t << 2) | a) + 3] |= 16;\n if (t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n d[((t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n d[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for reduction(|:r)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-3.C", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "r (a = 0; A::a < 31; a += 2)\n r |= (1 << A::a);\n if (r != 0x55555555)\n __builtin_abort ();\n for (R::r = 0; r < 32; R::r += 2)\n d[r + 8] |= 8;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)))\n __builtin_abort ();\n #pragma omp simd collapse(2)\n for (T::t = 0; t < 7; t += 2)\n for (a = 0; A::a < 8; a++)\n d[((t << 2) | a) + 3] |= 16;\n if (t != 8 || A::a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 16 : 0)))\n __builtin_abort ();\n T::t = 32;\n a = 16;\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd collapse(2)\n for (t = 0; T::t < 7; T::t += 2)\n for (A::a = 0; a < 8; A::a++)\n d[((t << 2) | A::a) + 3] |= 32;\n if (T::t != 8 || a != 8)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n #pragma omp parallel\n #pragma omp single\n #pragma omp taskloop simd\n for (R::r = 0; r < 31; R::r += 2)\n d[r + 8] |= 64;\n if (r != 32)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0)\n\t\t | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0)\n\t\t | ((i >= 9 && i < 32 + 9) ? 4 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n a.m1 ();\n} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr43893.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:c)", "context_chars": 100, "text": "}\n\nextern \"C\" void abort ();\n\ntemplate \nvoid\nf1 ()\n{\n int c;\n T i;\n c = 0;\nfor (i = M; i < N; i++)\n c++;\n if (c != 1)\n abort ();\n}\n\ntemplate \nvoid\nf2 ()\n{\n int c;\n T i;\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (i = M; i <= N; i++)\n c++;\n if (c != 1)\n abort ();\n} #pragma omp parallel for reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr43893.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:c)", "context_chars": 100, "text": " if (c != 1)\n abort ();\n}\n\ntemplate \nvoid\nf2 ()\n{\n int c;\n T i;\n c = 0;\nfor (i = M; i <= N; i++)\n c++;\n if (c != 1)\n abort ();\n}\n\ntemplate \nvoid\nf3 ()\n{\n int c;\n T i;\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (i = M; i > N; i--)\n c++;\n if (c != 1)\n abort ();\n} #pragma omp parallel for reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr43893.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:c)", "context_chars": 100, "text": " if (c != 1)\n abort ();\n}\n\ntemplate \nvoid\nf3 ()\n{\n int c;\n T i;\n c = 0;\nfor (i = M; i > N; i--)\n c++;\n if (c != 1)\n abort ();\n}\n\ntemplate \nvoid\nf4 ()\n{\n int c;\n T i;\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (i = M; i >= N; i--)\n c++;\n if (c != 1)\n abort ();\n} #pragma omp parallel for reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr43893.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:c)", "context_chars": 100, "text": " if (c != 1)\n abort ();\n}\n\ntemplate \nvoid\nf4 ()\n{\n int c;\n T i;\n c = 0;\nfor (i = M; i >= N; i--)\n c++;\n if (c != 1)\n abort ();\n}\n\nint\nmain ()\n{\n int c;\n unsigned int i;\n int j;\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (i = 0; i < 1; i++)\n c++;\n if (c != 1)\n abort ();\n f1 ();\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (i = 0; i <= 0; i++)\n c++;\n if (c != 1)\n abort ();\n f2 ();\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (j = - __INT_MAX__ - 1; j < - __INT_MAX__; j++)\n c++;\n if (c != 1)\n abort ();\n f1 ();\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (j = - __INT_MAX__ - 1; j <= - __INT_MAX__ - 1; j++)\n c++;\n if (c != 1)\n abort ();\n f2 ();\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (i = 2U * __INT_MAX__ + 1; i > 2U * __INT_MAX__; i--)\n c++;\n if (c != 1)\n abort ();\n f3 ();\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (i = 2U * __INT_MAX__ + 1; i >= 2U * __INT_MAX__ + 1; i--)\n c++;\n if (c != 1)\n abort ();\n f4 ();\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (j = __INT_MAX__; j > __INT_MAX__ - 1; j--)\n c++;\n if (c != 1)\n abort ();\n f3 ();\n c = 0;\n#pragma omp parallel for reduction(+:c)\n for (j = __INT_MAX__; j >= __INT_MAX__; j--)\n c++;\n if (c != 1)\n abort ();\n f4 ();\n return 0;\n} #pragma omp parallel for reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-5.C", "omp_pragma_line": "#pragma omp parallel for private (a) reduction(|:w)", "context_chars": 100, "text": " &, const I &); };\n\nint d[64];\n\nvoid\nA::m1 (const I &x, const I &y)\n{\n int w = 0;\n for (a = x; A::a < y - 33; a += 2)\n w |= (1 << *A::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for lastprivate (t)\n for (T::t = x; t < y - 32; t += 3)\n d[*T::t + 2] |= 1;\n if (*T::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0))\n __builtin_abort ();\n w = 0;\n #pragma omp parallel for reduction(|:w)\n for (a = x; A::a < y - 33; a += 2)\n w |= (1 << *A::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp taskloop\n for (R::r = x; r < y - 32; R::r += 2)\n d[*r + 8] |= 2;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp taskloop collapse(2)\n for (T::t = x; t < y - 57; t += 2)\n for (a = x; A::a < y - 56; a++)\n d[((*t << 2) | *a) + 3] |= 4;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 4 : 0)))\n __builtin_abort ();\n}\n\ntemplate \nvoid\nB::m2 (const Q &u, const Q &v, const I &x, const I &y)\n{\n int w = 0;\n #pragma omp parallel for private (a) reduction(|:w)\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for lastprivate (U::t)\n for (U::t = u; U::t < v - 32; U::t += 3)\n d[*U::t + 2] |= 1;\n if (*U::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0))\n __builtin_abort ();\n w = 0;\n #pragma omp parallel for reduction(|:w)\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp taskloop\n for (R::r = x; r < y - 32; R::r += 2)\n d[*r + 8] |= 2;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp taskloop collapse(2)\n for (U::t = u; U::t < v - 57; U::t += 2)\n for (a = u; B::a < v - 56; a++)\n d[((*U::t << 2) | *a) + 3] |= 4;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 4 : 0)))\n __builtin_abort ();\n} #pragma omp parallel for private (a) reduction(|:w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (t)", "context_chars": 100, "text": "= x; A::a < y - 33; a += 2)\n w |= (1 << *A::a);\n if (w != 0x55555555)\n __builtin_abort ();\n for (T::t = x; t < y - 32; t += 3)\n d[*T::t + 2] |= 1;\n if (*T::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0))\n __builtin_abort ();\n w = 0;\n #pragma omp parallel for reduction(|:w)\n for (a = x; A::a < y - 33; a += 2)\n w |= (1 << *A::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp taskloop\n for (R::r = x; r < y - 32; R::r += 2)\n d[*r + 8] |= 2;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp taskloop collapse(2)\n for (T::t = x; t < y - 57; t += 2)\n for (a = x; A::a < y - 56; a++)\n d[((*t << 2) | *a) + 3] |= 4;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 4 : 0)))\n __builtin_abort ();\n}\n\ntemplate \nvoid\nB::m2 (const Q &u, const Q &v, const I &x, const I &y)\n{\n int w = 0;\n #pragma omp parallel for private (a) reduction(|:w)\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for lastprivate (U::t)\n for (U::t = u; U::t < v - 32; U::t += 3)\n d[*U::t + 2] |= 1;\n if (*U::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0))\n __builtin_abort ();\n w = 0;\n #pragma omp parallel for reduction(|:w)\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp taskloop\n for (R::r = x; r < y - 32; R::r += 2)\n d[*r + 8] |= 2;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp taskloop collapse(2)\n for (U::t = u; U::t < v - 57; U::t += 2)\n for (a = u; B::a < v - 56; a++)\n d[((*U::t << 2) | *a) + 3] |= 4;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 4 : 0)))\n __builtin_abort ();\n} #pragma omp parallel for lastprivate (t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-5.C", "omp_pragma_line": "#pragma omp parallel for reduction(|:w)", "context_chars": 100, "text": "(d[i] != ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0))\n __builtin_abort ();\n w = 0;\n for (a = x; A::a < y - 33; a += 2)\n w |= (1 << *A::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp taskloop\n for (R::r = x; r < y - 32; R::r += 2)\n d[*r + 8] |= 2;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp taskloop collapse(2)\n for (T::t = x; t < y - 57; t += 2)\n for (a = x; A::a < y - 56; a++)\n d[((*t << 2) | *a) + 3] |= 4;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 4 : 0)))\n __builtin_abort ();\n}\n\ntemplate \nvoid\nB::m2 (const Q &u, const Q &v, const I &x, const I &y)\n{\n int w = 0;\n #pragma omp parallel for private (a) reduction(|:w)\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for lastprivate (U::t)\n for (U::t = u; U::t < v - 32; U::t += 3)\n d[*U::t + 2] |= 1;\n if (*U::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0))\n __builtin_abort ();\n w = 0;\n #pragma omp parallel for reduction(|:w)\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp taskloop\n for (R::r = x; r < y - 32; R::r += 2)\n d[*r + 8] |= 2;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp taskloop collapse(2)\n for (U::t = u; U::t < v - 57; U::t += 2)\n for (a = u; B::a < v - 56; a++)\n d[((*U::t << 2) | *a) + 3] |= 4;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 4 : 0)))\n __builtin_abort ();\n} #pragma omp parallel for reduction(|:w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-5.C", "omp_pragma_line": "#pragma omp parallel for private (a) reduction(|:w)", "context_chars": 100, "text": "pename Q>\nvoid\nB::m2 (const Q &u, const Q &v, const I &x, const I &y)\n{\n int w = 0;\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp parallel for lastprivate (U::t)\n for (U::t = u; U::t < v - 32; U::t += 3)\n d[*U::t + 2] |= 1;\n if (*U::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0))\n __builtin_abort ();\n w = 0;\n #pragma omp parallel for reduction(|:w)\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp taskloop\n for (R::r = x; r < y - 32; R::r += 2)\n d[*r + 8] |= 2;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp taskloop collapse(2)\n for (U::t = u; U::t < v - 57; U::t += 2)\n for (a = u; B::a < v - 56; a++)\n d[((*U::t << 2) | *a) + 3] |= 4;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 4 : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n int b[128];\n for (int i = 0; i < 128; i++)\n b[i] = i - 32;\n a.m1 (&b[32], &b[96]);\n for (int i = 0; i < 64; i++)\n d[i] = 0;\n B > c;\n c.m2 (&b[32], &b[96], &b[32], &b[96]);\n for (int i = 0; i < 64; i++)\n d[i] = 0;\n B d;\n d.m2 (&b[32], &b[96], &b[32], &b[96]);\n} #pragma omp parallel for private (a) reduction(|:w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (U::t)", "context_chars": 100, "text": "= u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n for (U::t = u; U::t < v - 32; U::t += 3)\n d[*U::t + 2] |= 1;\n if (*U::t != 33)\n __builtin_abort ();\n for (int i = 0; i < 64; i++)\n if (d[i] != ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0))\n __builtin_abort ();\n w = 0;\n #pragma omp parallel for reduction(|:w)\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp taskloop\n for (R::r = x; r < y - 32; R::r += 2)\n d[*r + 8] |= 2;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp taskloop collapse(2)\n for (U::t = u; U::t < v - 57; U::t += 2)\n for (a = u; B::a < v - 56; a++)\n d[((*U::t << 2) | *a) + 3] |= 4;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 4 : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n int b[128];\n for (int i = 0; i < 128; i++)\n b[i] = i - 32;\n a.m1 (&b[32], &b[96]);\n for (int i = 0; i < 64; i++)\n d[i] = 0;\n B > c;\n c.m2 (&b[32], &b[96], &b[32], &b[96]);\n for (int i = 0; i < 64; i++)\n d[i] = 0;\n B d;\n d.m2 (&b[32], &b[96], &b[32], &b[96]);\n} #pragma omp parallel for lastprivate (U::t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-5.C", "omp_pragma_line": "#pragma omp parallel for reduction(|:w)", "context_chars": 100, "text": "(d[i] != ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0))\n __builtin_abort ();\n w = 0;\n for (a = u; B::a < v - 33; a += 2)\n w |= (1 << *B::a);\n if (w != 0x55555555)\n __builtin_abort ();\n #pragma omp taskloop\n for (R::r = x; r < y - 32; R::r += 2)\n d[*r + 8] |= 2;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)))\n __builtin_abort ();\n #pragma omp taskloop collapse(2)\n for (U::t = u; U::t < v - 57; U::t += 2)\n for (a = u; B::a < v - 56; a++)\n d[((*U::t << 2) | *a) + 3] |= 4;\n for (int i = 0; i < 64; i++)\n if (d[i] != (((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 1 : 0)\n\t\t | ((i >= 8 && i < 32 + 8 && (i & 1) == 0) ? 2 : 0)\n\t\t | ((i >= 3 && i < 32 + 3) ? 4 : 0)))\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n A a;\n int b[128];\n for (int i = 0; i < 128; i++)\n b[i] = i - 32;\n a.m1 (&b[32], &b[96]);\n for (int i = 0; i < 64; i++)\n d[i] = 0;\n B > c;\n c.m2 (&b[32], &b[96], &b[32], &b[96]);\n for (int i = 0; i < 64; i++)\n d[i] = 0;\n B d;\n d.m2 (&b[32], &b[96], &b[32], &b[96]);\n} #pragma omp parallel for reduction(|:w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/simd-8.C", "omp_pragma_line": "#pragma omp parallel for simd aligned(a : 32) reduction(+:s) \\", "context_chars": 100, "text": "int:omp_out += omp_in)\n\n__attribute__((noinline, noclone)) int\nfoo ()\n{\n int i, u = 0;\n S s, t;\n reduction(foo:t, u)\n for (i = 0; i < 1024; i++)\n {\n int x = a[i];\n s.s += x;\n t.s += x;\n u += x;\n } #pragma omp parallel for simd aligned(a : 32) reduction(+:s) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-7.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n{\n if (type != 1)\n abort ();\n#pragma omp atomic\n cnt++;\n}\n\ntemplate \nvoid\nf1 ()\n{\nfor (auto i = 0; i < 10; i++)\n f0 (i, 0);\n}\n\ntemplate \nvoid\nf2 ()\n{\n#pragma omp parallel for\n for (auto i = T (0); i < T (10); i += T (1))\n f0 (i, 0);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-7.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "p parallel for\n for (auto i = 0; i < 10; i++)\n f0 (i, 0);\n}\n\ntemplate \nvoid\nf2 ()\n{\nfor (auto i = T (0); i < T (10); i += T (1))\n f0 (i, 0);\n}\n\nvoid\nf3 ()\n{\n#pragma omp parallel for\n for (auto i = 0; i < 10; i++)\n f0 (i, 0);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-7.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "agma omp parallel for\n for (auto i = T (0); i < T (10); i += T (1))\n f0 (i, 0);\n}\n\nvoid\nf3 ()\n{\nfor (auto i = 0; i < 10; i++)\n f0 (i, 0);\n}\n\nconst char *p = \"abcdefghij\";\n\ntemplate \nvoid\nf4 ()\n{\n#pragma omp parallel for\n for (auto i = p; i < p + 10; i++)\n f0 (i, 1);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-7.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "0; i < 10; i++)\n f0 (i, 0);\n}\n\nconst char *p = \"abcdefghij\";\n\ntemplate \nvoid\nf4 ()\n{\nfor (auto i = p; i < p + 10; i++)\n f0 (i, 1);\n}\n\ntemplate \nvoid\nf5 ()\n{\n#pragma omp parallel for\n for (auto i = T (p); i < T (p + 10); i++)\n f0 (i, 1);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-7.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rallel for\n for (auto i = p; i < p + 10; i++)\n f0 (i, 1);\n}\n\ntemplate \nvoid\nf5 ()\n{\nfor (auto i = T (p); i < T (p + 10); i++)\n f0 (i, 1);\n}\n\nvoid\nf6 ()\n{\n#pragma omp parallel for\n for (auto i = p; i < p + 10; i++)\n f0 (i, 1);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-7.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "#pragma omp parallel for\n for (auto i = T (p); i < T (p + 10); i++)\n f0 (i, 1);\n}\n\nvoid\nf6 ()\n{\nfor (auto i = p; i < p + 10; i++)\n f0 (i, 1);\n}\n\nint\nmain ()\n{\n f1 ();\n if (cnt != 10)\n abort ();\n f2 ();\n if (cnt != 20)\n abort ();\n f3 ();\n if (cnt != 30)\n abort ();\n f4 ();\n if (cnt != 40)\n abort ();\n f5 ();\n if (cnt != 50)\n abort ();\n f6 ();\n if (cnt != 60)\n abort ();\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i < 0 || *i >= 2000)\n abort ();\n results[*i]++;\n}\n\nvoid\nf1 (const I &x, const I &y)\n{\nfor (I i = x; i <= y; i += 6)\n baz (i);\n}\n\nvoid\nf2 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel for private(i)\n for (i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "nt> i = x; i <= y; i += 6)\n baz (i);\n}\n\nvoid\nf2 (const I &x, const I &y)\n{\n I i;\nfor (i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n}\n\ntemplate \nvoid\nf3 (const I &x, const I &y)\n{\n#pragma omp parallel for\n for (I i = x; i <= y; i = i + 9 - 8)\n baz (i);\n} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 1 - 6 + 7 + i)\n baz (i);\n}\n\ntemplate \nvoid\nf3 (const I &x, const I &y)\n{\nfor (I i = x; i <= y; i = i + 9 - 8)\n baz (i);\n}\n\ntemplate \nvoid\nf4 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel for lastprivate(i)\n for (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for lastprivate(i)", "context_chars": 100, "text": "- 8)\n baz (i);\n}\n\ntemplate \nvoid\nf4 (const I &x, const I &y)\n{\n I i;\nfor (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n}\n\nvoid\nf5 (const I &x, const I &y)\n{\n#pragma omp parallel for\n for (I i = x + 2000 - 64; i > y + 10; i -= 10)\n baz (i);\n} #pragma omp parallel for lastprivate(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n}\n\nvoid\nf5 (const I &x, const I &y)\n{\nfor (I i = x + 2000 - 64; i > y + 10; i -= 10)\n baz (i);\n}\n\ntemplate \nvoid\nf6 (const I &x, const I &y)\n{\n#pragma omp parallel for\n for (I i = x + 2000 - 64; i > y + 10; i = i - 12 + 2)\n {\n I j = i + N;\n baz (j);\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i > y + 10; i -= 10)\n baz (i);\n}\n\ntemplate \nvoid\nf6 (const I &x, const I &y)\n{\nfor (I i = x + 2000 - 64; i > y + 10; i = i - 12 + 2)\n {\n I j = i + N;\n baz (j);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " N;\n baz (j);\n }\n}\n\ntemplate \nvoid\nf7 (I i, const I &x, const I &y)\n{\nfor (i = x - 10; i <= y + 10; i += N)\n baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n I i;\n#pragma omp parallel for\n for (i = j.begin (); i <= j.end () + N; i += 2)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(i = x - 10; i <= y + 10; i += N)\n baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n I i;\nfor (i = j.begin (); i <= j.end () + N; i += 2)\n baz (i);\n}\n\ntemplate \nvoid\nf9 (const I &x, const I &y)\n{\n#pragma omp parallel for\n for (I i = x; i <= y; i = i + N)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " + N; i += 2)\n baz (i);\n}\n\ntemplate \nvoid\nf9 (const I &x, const I &y)\n{\nfor (I i = x; i <= y; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf10 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel for\n for (i = x; i > y; i = i + N)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "N)\n baz (i);\n}\n\ntemplate \nvoid\nf10 (const I &x, const I &y)\n{\n I i;\nfor (i = x; i > y; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf11 (const T &x, const T &y)\n{\n#pragma omp parallel\n {\n#pragma omp for nowait\n for (T i = x; i <= y; i += 3)\n baz (i);\n#pragma omp single\n {\n T j = y + 3;\n baz (j);\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "y + 3;\n baz (j);\n }\n }\n}\n\ntemplate \nvoid\nf12 (const T &x, const T &y)\n{\n T i;\nfor (i = x; i > y; --i)\n baz (i);\n}\n\ntemplate \nstruct K\n{\n template \n static void\n f13 (const T &x, const T &y)\n {\n#pragma omp parallel for\n for (T i = x; i <= y + N; i += N)\n baz (i);\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "emplate \nstruct K\n{\n template \n static void\n f13 (const T &x, const T &y)\n {\nfor (T i = x; i <= y + N; i += N)\n baz (i);\n }\n};\n\n#define check(expr) \\\n for (int i = 0; i < 2000; i++)\t\t\t\\\n if (expr)\t\t\t\t\t\t\\\n {\t\t\t\t\t\t\t\\\n\tif (results[i] != 1)\t\t\t\t\\\n\t abort ();\t\t\t\t\t\\\n\tresults[i] = 0;\t\t\t\t\t\\\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-2.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (a, T::t, b, f) lastprivate (A::a, r, T::t, n)", "context_chars": 100, "text": " __builtin_abort ();\n }\n bool f = false;\n a = -5;\n b = -4;\n r = -3;\n T::t = -2;\n int n;\n for (int i = 0; i < omp_get_num_threads (); i++)\n {\n int q = omp_get_thread_num ();\n if (!f)\n\t{\n\t if (A::a != -5 || A::b != -4 || T::t != -2)\n\t __builtin_abort ();\n\t}\n else if (a != q || b != 2 * q || r != 3 * q || T::t != 4 * q)\n\t__builtin_abort ();\n take (a, r, T::t, b);\n A::a = q;\n A::b = 2 * q;\n R::r = 3 * q;\n T::t = 4 * q;\n n = q;\n f = true;\n } #pragma omp parallel for firstprivate (a, T::t, b, f) lastprivate (A::a, r, T::t, n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-2.C", "omp_pragma_line": "#pragma omp parallel for reduction (+: A::a, T::t, b, R::r)", "context_chars": 100, "text": " * n || T::t != 4 * n)\n\t__builtin_abort ();\n }\n a = 0;\n b = 0;\n R::r = 0;\n T::t = 0;\n for (int i = 0; i < 30; i++)\n {\n a += i;\n A::b += 2 * i;\n r += 3 * i;\n T::t += 4 * i;\n take (a, b, r, T::t);\n } #pragma omp parallel for reduction (+: A::a, T::t, b, R::r)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-6.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n{\n if (type != 1)\n abort ();\n#pragma omp atomic\n cnt++;\n}\n\ntemplate \nvoid\nf1 ()\n{\nfor (int i = 0; i < 10; i++)\n f0 (i, 0);\n}\n\ntemplate \nvoid\nf2 ()\n{\n#pragma omp parallel for\n for (T i = T (0); i < T (10); i += T (1))\n f0 (i, 0);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-6.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "mp parallel for\n for (int i = 0; i < 10; i++)\n f0 (i, 0);\n}\n\ntemplate \nvoid\nf2 ()\n{\nfor (T i = T (0); i < T (10); i += T (1))\n f0 (i, 0);\n}\n\nvoid\nf3 ()\n{\n#pragma omp parallel for\n for (int i = 0; i < 10; i++)\n f0 (i, 0);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-6.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "#pragma omp parallel for\n for (T i = T (0); i < T (10); i += T (1))\n f0 (i, 0);\n}\n\nvoid\nf3 ()\n{\nfor (int i = 0; i < 10; i++)\n f0 (i, 0);\n}\n\nconst char *p = \"abcdefghij\";\n\ntemplate \nvoid\nf4 ()\n{\n#pragma omp parallel for\n for (const char *i = p; i < p + 10; i += 1)\n f0 (i, 1);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-6.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "0; i < 10; i++)\n f0 (i, 0);\n}\n\nconst char *p = \"abcdefghij\";\n\ntemplate \nvoid\nf4 ()\n{\nfor (const char *i = p; i < p + 10; i += 1)\n f0 (i, 1);\n}\n\ntemplate \nvoid\nf5 ()\n{\n#pragma omp parallel for\n for (T i = T (p); i < T (p + 10); i += 1)\n f0 (i, 1);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-6.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n for (const char *i = p; i < p + 10; i += 1)\n f0 (i, 1);\n}\n\ntemplate \nvoid\nf5 ()\n{\nfor (T i = T (p); i < T (p + 10); i += 1)\n f0 (i, 1);\n}\n\nvoid\nf6 ()\n{\n#pragma omp parallel for\n for (const char *i = p; i < p + 10; i++)\n f0 (i, 1);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-6.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "#pragma omp parallel for\n for (T i = T (p); i < T (p + 10); i += 1)\n f0 (i, 1);\n}\n\nvoid\nf6 ()\n{\nfor (const char *i = p; i < p + 10; i++)\n f0 (i, 1);\n}\n\nint\nmain ()\n{\n f1 ();\n if (cnt != 10)\n abort ();\n f2 ();\n if (cnt != 20)\n abort ();\n f3 ();\n if (cnt != 30)\n abort ();\n f4 ();\n if (cnt != 40)\n abort ();\n f5 ();\n if (cnt != 50)\n abort ();\n f6 ();\n if (cnt != 60)\n abort ();\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i < 0 || *i >= 2000)\n abort ();\n results[*i]++;\n}\n\nvoid\nf1 (const I &x, const I &y)\n{\nfor (I i = x; y >= i; i += 6)\n baz (i);\n}\n\nvoid\nf2 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel for private(i)\n for (i = x; y - 1 > i; i = 1 - 6 + 7 + i)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "nt> i = x; y >= i; i += 6)\n baz (i);\n}\n\nvoid\nf2 (const I &x, const I &y)\n{\n I i;\nfor (i = x; y - 1 > i; i = 1 - 6 + 7 + i)\n baz (i);\n}\n\ntemplate \nvoid\nf3 (const I &x, const I &y)\n{\n#pragma omp parallel for\n for (I i = x; y >= i; i = i + 9 - 8)\n baz (i);\n} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 1 - 6 + 7 + i)\n baz (i);\n}\n\ntemplate \nvoid\nf3 (const I &x, const I &y)\n{\nfor (I i = x; y >= i; i = i + 9 - 8)\n baz (i);\n}\n\ntemplate \nvoid\nf4 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel for lastprivate(i)\n for (i = x + 2000 - 64; y + 10 < i; --i)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for lastprivate(i)", "context_chars": 100, "text": "- 8)\n baz (i);\n}\n\ntemplate \nvoid\nf4 (const I &x, const I &y)\n{\n I i;\nfor (i = x + 2000 - 64; y + 10 < i; --i)\n baz (i);\n}\n\nvoid\nf5 (const I &x, const I &y)\n{\n#pragma omp parallel for\n for (I i = x + 2000 - 64; y + 10 < i; i -= 10)\n baz (i);\n} #pragma omp parallel for lastprivate(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r (i = x + 2000 - 64; y + 10 < i; --i)\n baz (i);\n}\n\nvoid\nf5 (const I &x, const I &y)\n{\nfor (I i = x + 2000 - 64; y + 10 < i; i -= 10)\n baz (i);\n}\n\ntemplate \nvoid\nf6 (const I &x, const I &y)\n{\n#pragma omp parallel for\n for (I i = x + 2000 - 64; y + 10 < i; i = i - 12 + 2)\n {\n I j = i + N;\n baz (j);\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " y + 10 < i; i -= 10)\n baz (i);\n}\n\ntemplate \nvoid\nf6 (const I &x, const I &y)\n{\nfor (I i = x + 2000 - 64; y + 10 < i; i = i - 12 + 2)\n {\n I j = i + N;\n baz (j);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " N;\n baz (j);\n }\n}\n\ntemplate \nvoid\nf7 (I i, const I &x, const I &y)\n{\nfor (i = x - 10; y + 10 >= i; i += N)\n baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n I i;\n#pragma omp parallel for\n for (i = j.begin (); j.end () + N >= i; i += 2)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(i = x - 10; y + 10 >= i; i += N)\n baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n I i;\nfor (i = j.begin (); j.end () + N >= i; i += 2)\n baz (i);\n}\n\ntemplate \nvoid\nf9 (const I &x, const I &y)\n{\n#pragma omp parallel for\n for (I i = x; y >= i; i = i + N)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ">= i; i += 2)\n baz (i);\n}\n\ntemplate \nvoid\nf9 (const I &x, const I &y)\n{\nfor (I i = x; y >= i; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf10 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel for\n for (i = x; y < i; i = i + N)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "N)\n baz (i);\n}\n\ntemplate \nvoid\nf10 (const I &x, const I &y)\n{\n I i;\nfor (i = x; y < i; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf11 (const T &x, const T &y)\n{\n#pragma omp parallel\n {\n#pragma omp for nowait\n for (T i = x; y >= i; i += 3)\n baz (i);\n#pragma omp single\n {\n T j = y + 3;\n baz (j);\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "y + 3;\n baz (j);\n }\n }\n}\n\ntemplate \nvoid\nf12 (const T &x, const T &y)\n{\n T i;\nfor (i = x; y < i; --i)\n baz (i);\n}\n\ntemplate \nstruct K\n{\n template \n static void\n f13 (const T &x, const T &y)\n {\n#pragma omp parallel for\n for (T i = x; y + N >= i; i += N)\n baz (i);\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "emplate \nstruct K\n{\n template \n static void\n f13 (const T &x, const T &y)\n {\nfor (T i = x; y + N >= i; i += N)\n baz (i);\n }\n};\n\n#define check(expr) \\\n for (int i = 0; i < 2000; i++)\t\t\t\\\n if (expr)\t\t\t\t\t\t\\\n {\t\t\t\t\t\t\t\\\n\tif (results[i] != 1)\t\t\t\t\\\n\t abort ();\t\t\t\t\t\\\n\tresults[i] = 0;\t\t\t\t\t\\\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reference-1.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (g, k) lastprivate (g, k)", "context_chars": 100, "text": "2; j++)\n\t{\n\t g = j;\n\t k = 3 * j;\n\t}\n }\n if (g != 31 || k != 31 * 3)\n __builtin_abort ();\n for (int j = 0; j < 32; j++)\n {\n if (g != 31 || k != 31 * 3)\n\t__builtin_abort ();\n if (j == 31)\n\t{\n\t g = 29;\n\t k = 138;\n\t}\n } #pragma omp parallel for firstprivate (g, k) lastprivate (g, k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-1.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (a, T::t, b, f) lastprivate (A::a, r, t, n)", "context_chars": 100, "text": "\n __builtin_abort ();\n }\n bool f = false;\n a = -5;\n b = -4;\n r = -3;\n t = -2;\n int n;\n for (int i = 0; i < omp_get_num_threads (); i++)\n {\n int q = omp_get_thread_num ();\n if (!f)\n\t{\n\t if (A::a != -5 || A::b != -4 || T::t != -2)\n\t __builtin_abort ();\n\t}\n else if (a != q || b != 2 * q || r != 3 * q || t != 4 * q)\n\t__builtin_abort ();\n take (a, r, t, b);\n A::a = q;\n A::b = 2 * q;\n R::r = 3 * q;\n T::t = 4 * q;\n n = q;\n f = true;\n } #pragma omp parallel for firstprivate (a, T::t, b, f) lastprivate (A::a, r, t, n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-1.C", "omp_pragma_line": "#pragma omp parallel for reduction (+: A::a, t, b, R::r)", "context_chars": 100, "text": " n || r != 3 * n || t != 4 * n)\n\t__builtin_abort ();\n }\n a = 0;\n b = 0;\n R::r = 0;\n t = 0;\n for (int i = 0; i < 30; i++)\n {\n a += i;\n A::b += 2 * i;\n r += 3 * i;\n T::t += 4 * i;\n take (a, b, r, t);\n } #pragma omp parallel for reduction (+: A::a, t, b, R::r)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr49043.C", "omp_pragma_line": "#pragma omp parallel for reduction (+:r)", "context_chars": 100, "text": "dg-options \"-std=c++0x\" }\n// { dg-do run }\n\nextern \"C\" void abort ();\n\nint\nmain ()\n{\n int r = 0;\n for (int a = 0; a < 10; ++a)\n {\n\tauto func = [=] () { return a; };\n\tr += func ();\n } #pragma omp parallel for reduction (+:r)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "de \n\nint\ntest1 ()\n{\n short int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "t ();\n return 0;\n}\n\nint\ntest2 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "t ();\n return 0;\n}\n\nint\ntest3 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "t ();\n return 0;\n}\n\nint\ntest4 ()\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-11.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain ()\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/doacross-1.C", "omp_pragma_line": "#pragma omp parallel for ordered(4) lastprivate (i, j, k) schedule(static, 1)", "context_chars": 100, "text": "const I &f, int g, int h,\n I &r1, I &r2, I &r3)\n{\n I i, j, k;\n int l;\nfor (i = a; i <= b; i++)\n for (j = c; j < d; j++)\n for (k = e; k < f; k++)\n\tfor (l = g; l < h; l++)\n\t {\n\t #pragma omp ordered depend(sink: i - 1, j, k + 1, l - 2)\n\t baz (i, j, k, l);\n\t if (i > a && k < f - 1 && l > g + 1)\n\t {\n\t\tint m;\n\t\t#pragma omp atomic read\n\t\tm = results[512 * *(i - 1) + 64 * *j + 8 * *(k + 1) + l - 2];\n\t\tif (m == 0)\n\t\t abort ();\n\t }\n\t #pragma omp ordered depend(source)\n\t } #pragma omp parallel for ordered(4) lastprivate (i, j, k) schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/doacross-1.C", "omp_pragma_line": "#pragma omp parallel for collapse (1) ordered(4) lastprivate (i, j, k) schedule(static, 2)", "context_chars": 100, "text": " a, int b, int c, int d, int e, int f, int g, int h, int &r1, int &r2, int &r3)\n{\n int i, j, k, l;\nfor (i = a; i <= b; i++)\n for (j = c; j < d; j++)\n for (k = e; k < f; k++)\n\tfor (l = g; l < h; l++)\n\t {\n\t #pragma omp ordered depend(sink: i - 1, j, k + 1, l - 2)\n\t baz (i, j, k, l);\n\t if (i > a && k < f - 1 && l > g + 1)\n\t {\n\t\tint m;\n\t\t#pragma omp atomic read\n\t\tm = results[512 * (i - 1) + 64 * j + 8 * (k + 1) + l - 2];\n\t\tif (m == 0)\n\t\t abort ();\n\t }\n\t #pragma omp ordered depend(source)\n\t } #pragma omp parallel for collapse (1) ordered(4) lastprivate (i, j, k) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/doacross-1.C", "omp_pragma_line": "#pragma omp parallel for collapse (2) ordered(4) lastprivate (i, j, k) schedule(static, 1)", "context_chars": 100, "text": "const I &f, int g, int h,\n I &r1, I &r2, I &r3)\n{\n I i, j, k;\n int l;\nfor (i = a; i <= b; i++)\n for (j = c; j < d; j++)\n for (k = e; k < f; k++)\n\tfor (l = g; l < h; l++)\n\t {\n\t #pragma omp ordered depend(sink: i - 1, j, k + 1, l - 2)\n\t baz (i, j, k, l);\n\t if (i > a && k < f - 1 && l > g + 1)\n\t {\n\t\tint m;\n\t\t#pragma omp atomic read\n\t\tm = results[512 * *(i - 1) + 64 * *j + 8 * *(k + 1) + l - 2];\n\t\tif (m == 0)\n\t\t abort ();\n\t }\n\t #pragma omp ordered depend(source)\n\t } #pragma omp parallel for collapse (2) ordered(4) lastprivate (i, j, k) schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/doacross-1.C", "omp_pragma_line": "#pragma omp parallel for collapse (2) ordered(4) lastprivate (i, j, k) schedule(static, 2)", "context_chars": 100, "text": " a, int b, int c, int d, int e, int f, int g, int h, int &r1, int &r2, int &r3)\n{\n int i, j, k, l;\nfor (i = a; i <= b; i++)\n for (j = c; j < d; j++)\n for (k = e; k < f; k++)\n\tfor (l = g; l < h; l++)\n\t {\n\t #pragma omp ordered depend(sink: i - 1, j, k + 1, l - 2)\n\t baz (i, j, k, l);\n\t if (i > a && k < f - 1 && l > g + 1)\n\t {\n\t\tint m;\n\t\t#pragma omp atomic read\n\t\tm = results[512 * (i - 1) + 64 * j + 8 * (k + 1) + l - 2];\n\t\tif (m == 0)\n\t\t abort ();\n\t }\n\t #pragma omp ordered depend(source)\n\t } #pragma omp parallel for collapse (2) ordered(4) lastprivate (i, j, k) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-5.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\", "context_chars": 100, "text": "o (int (*&x)[3][2], int *y, long (&w)[1][2])\n{\n unsigned long long a[9] = {};\n short b[5] = {};\n reduction(*:y[:3]) reduction(|:a[:4]) \\\n\t\t\t reduction(&:w[0:][:2]) reduction(max:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[0] *= 3;\n if ((i & 31) == 2)\n\ty[1] *= 7;\n if ((i & 63) == 3)\n\ty[2] *= 17;\n z[i / 32] += (i & 3);\n if (i < 4)\n\tz[i] += i;\n a[i / 32] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-5.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\", "context_chars": 100, "text": " w(w3), z(), a(), b(bb) {}\n __attribute__((noinline, noclone)) void foo ();\n};\n\nvoid\nS::foo ()\n{\n reduction(*:y[:3]) reduction(|:a[:4]) \\\n\t\t\t reduction(&:w[0:][:2]) reduction(max:b)\n for (int i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[0] *= 3;\n if ((i & 31) == 2)\n\ty[1] *= 7;\n if ((i & 63) == 3)\n\ty[2] *= 17;\n z[i / 32] += (i & 3);\n if (i < 4)\n\tz[i] += i;\n a[i / 32] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-12.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[-1:p1 + 1][:p2 + N - 2], z[t + N:p3]) \\", "context_chars": 100, "text": " a[p7 + 4];\n short bb[p7];\n short (&b)[p7] = bb;\n for (int i = 0; i < p7; i++)\n bb[i] = -6;\n reduction(*:y[-s:p4]) reduction(|:a[s + 3:p5 - N + 2]) \\\n\t\t\t reduction(&:w[s + 1:p6 - 3 + N][t:p6]) reduction(maxb:b[N:])\n for (int i = 0; i < 128; i++)\n {\n x[i / 64 - 1][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[1].t *= 3;\n if ((i & 31) == N)\n\ty[2].t *= 7;\n if ((i & 63) == 3)\n\ty[N + 1].t *= 17;\n z[i / 32 + 2].t += (i & 3);\n if (i < 4)\n\tz[i + N].t += i;\n a[i / 32 + 2].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 23) > b[N])\n\tb[N] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[-1:p1 + 1][:p2 + N - 2], z[t + N:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-12.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[-1:p1 + 1][:p2][0:N], z[t + N:p3 + N - 2]) \\", "context_chars": 100, "text": "t N>\nvoid\nS::foo (int p1, long p2, long p3, int p4, int p5, long p6, short p7, int s, int t)\n{\n reduction(*:y[-s:p4]) reduction(|:a[s + 3:p5]) \\\n\t\t\t reduction(&:w[s + 1:p6 - 3 + N][t:p6]) reduction(maxb:b[N:])\n for (int i = 0; i < 128; i++)\n {\n x[i / 64 - 1][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[1].t *= 3;\n if ((i & 31) == N)\n\ty[2].t *= 7;\n if ((i & 63) == 3)\n\ty[N + 1].t *= 17;\n z[i / 32 + 2].t += (i & 3);\n if (i < 4)\n\tz[i + N].t += i;\n a[i / 32 + 2].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 23) > b[N])\n\tb[N] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[-1:p1 + 1][:p2][0:N], z[t + N:p3 + N - 2]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (i)", "context_chars": 100, "text": "\n i += 3;\n }\n return I (i);\n}\n\nI\nf2 (const I &x, const I &y)\n{\n I i;\nfor (i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n return I (i);\n}\n\ntemplate \nI\nf3 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel\n #pragma omp for lastprivate (i)\n for (i = x + 1000 - 64; i <= y - 10; i++)\n baz (i);\n return i;\n} #pragma omp parallel for lastprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (i)", "context_chars": 100, "text": "i);\n return i;\n}\n\ntemplate \nI\nf4 (const I &x, const I &y)\n{\n I i;\nfor (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n return I (i);\n}\n\ntemplate \nI\nf5 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel for lastprivate (i)\n for (i = x; i > y + T (6); i--)\n baz (i);\n return i;\n} #pragma omp parallel for lastprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (i)", "context_chars": 100, "text": "urn I (i);\n}\n\ntemplate \nI\nf5 (const I &x, const I &y)\n{\n I i;\nfor (i = x; i > y + T (6); i--)\n baz (i);\n return i;\n}\n\ntemplate \nI\nf6 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel for lastprivate (i)\n for (i = x - T (7); i > y; i -= T (2))\n baz (i);\n return I (i);\n} #pragma omp parallel for lastprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (i)", "context_chars": 100, "text": "i);\n return i;\n}\n\ntemplate \nI\nf6 (const I &x, const I &y)\n{\n I i;\nfor (i = x - T (7); i > y; i -= T (2))\n baz (i);\n return I (i);\n}\n\ntemplate \nI\nf7 (I i, const I &x, const I &y)\n{\n#pragma omp parallel for lastprivate (i)\n for (i = x - 10; i <= y + 10; i += N)\n baz (i);\n return I (i);\n} #pragma omp parallel for lastprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (i)", "context_chars": 100, "text": ";\n return I (i);\n}\n\ntemplate \nI\nf7 (I i, const I &x, const I &y)\n{\nfor (i = x - 10; i <= y + 10; i += N)\n baz (i);\n return I (i);\n}\n\ntemplate \nI\nf8 (J j)\n{\n I i;\n#pragma omp parallel shared (i)\n #pragma omp for lastprivate (i)\n for (i = j.begin (); i <= j.end () + N; i += 2)\n baz (i);\n return i;\n} #pragma omp parallel for lastprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (i9)", "context_chars": 100, "text": ") + N; i += 2)\n baz (i);\n return i;\n}\n\nI i9;\n\ntemplate \nI &\nf9 (J j)\n{\nfor (i9 = j.begin () + N; i9 <= j.end () - N; i9 = i9 - N)\n baz (i9);\n return i9;\n}\n\ntemplate \nI\nf10 (const I &x, const I &y)\n{\n I i;\n#pragma omp parallel for lastprivate (i)\n for (i = x; i > y; i = i + N)\n baz (i);\n return i;\n} #pragma omp parallel for lastprivate (i9)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (i)", "context_chars": 100, "text": ");\n return i9;\n}\n\ntemplate \nI\nf10 (const I &x, const I &y)\n{\n I i;\nfor (i = x; i > y; i = i + N)\n baz (i);\n return i;\n}\n\ntemplate \nT\nf11 (T i, const T &x, const T &y)\n{\n#pragma omp parallel\n #pragma omp for lastprivate (i)\n for (i = x + U (2); i <= y + U (1); i = U (2) + U (3) + i)\n baz (i);\n return T (i);\n} #pragma omp parallel for lastprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-5.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (i)", "context_chars": 100, "text": " + i)\n baz (i);\n return T (i);\n}\n\ntemplate \nT\nf12 (const T &x, const T &y)\n{\n T i;\nfor (i = x; i > y; --i)\n baz (i);\n return i;\n}\n\n#define check(expr) \\\n for (int i = 0; i < 2000; i++)\t\t\t\\\n if (expr)\t\t\t\t\t\t\\\n {\t\t\t\t\t\t\t\\\n\tif (results[i] != 1)\t\t\t\t\\\n\t abort ();\t\t\t\t\t\\\n\tresults[i] = 0;\t\t\t\t\t\\\n } #pragma omp parallel for lastprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr30703.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (a) lastprivate (a) private (b) schedule (static, 1) num_threads (5)", "context_chars": 100, "text": "tomic\n cctor++;\n}\n\nA::~A()\n{\n#pragma omp atomic\n dtor++;\n}\n\nvoid\nfoo (A a, A b)\n{\n int i, j = 0;\nfor (i = 0; i < 5; i++)\n {\n b.i = 5;\n if (a.i != 6)\n\t#pragma omp atomic\n\t j += 1;\n a.i = b.i + i + 6;\n } #pragma omp parallel for firstprivate (a) lastprivate (a) private (b) schedule (static, 1) num_threads (5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-1.C", "omp_pragma_line": "#pragma omp parallel for collapse(4 - 1) schedule(static, 4)", "context_chars": 100, "text": "stdlib.h>\n\nint\nmain ()\n{\n int i, j, k, l = 0;\n int a[3][3][3];\n\n memset (a, '\\0', sizeof (a));\n for (i = 0; i < 2; i++)\n for (j = 0; j < 2; j++)\n\tfor (k = 0; k < 2; k++)\n\t a[i][j][k] = i + j * 4 + k * 16;\n #pragma omp parallel\n {\n #pragma omp for collapse(2) reduction(|:l) private (k)\n\tfor (i = 0; i < 2; i++)\n\t for (j = 0; j < 2; j++)\n\t for (k = 0; k < 2; k++)\n\t if (a[i][j][k] != i + j * 4 + k * 16)\n\t\tl = 1;\n } #pragma omp parallel for collapse(4 - 1) schedule(static, 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "oid\nf1 (const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\nfor (std::vector::const_iterator i = x; i <= y; i += 6)\n baz (i);\n}\n\nvoid\nf2 (const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\n std::vector::const_iterator i;\n#pragma omp parallel for private(i)\n for (i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "iterator &x,\n const std::vector::const_iterator &y)\n{\n std::vector::const_iterator i;\nfor (i = x; i < y - 1; i = 1 - 6 + 7 + i)\n baz (i);\n}\n\ntemplate \nvoid\nf3 (const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\n#pragma omp parallel for schedule (dynamic, 6)\n for (std::vector::const_iterator i = x; i <= y; i = i + 9 - 8)\n baz (i);\n} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 6)", "context_chars": 100, "text": "oid\nf3 (const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\nfor (std::vector::const_iterator i = x; i <= y; i = i + 9 - 8)\n baz (i);\n}\n\ntemplate \nvoid\nf4 (const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\n std::vector::const_iterator i;\n#pragma omp parallel for lastprivate(i)\n for (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n} #pragma omp parallel for schedule (dynamic, 6)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for lastprivate(i)", "context_chars": 100, "text": "iterator &x,\n const std::vector::const_iterator &y)\n{\n std::vector::const_iterator i;\nfor (i = x + 2000 - 64; i > y + 10; --i)\n baz (i);\n}\n\nvoid\nf5 (const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\n#pragma omp parallel for schedule (static, 10)\n for (std::vector::const_iterator i = x + 2000 - 64; i > y + 10; i -= 10)\n baz (i);\n} #pragma omp parallel for lastprivate(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 10)", "context_chars": 100, "text": "oid\nf5 (const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\nfor (std::vector::const_iterator i = x + 2000 - 64; i > y + 10; i -= 10)\n baz (i);\n}\n\ntemplate \nvoid\nf6 (const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\n#pragma omp parallel for schedule (runtime)\n for (std::vector::const_iterator i = x + 2000 - 64;\n i > y + 10; i = i - 12 + 2)\n {\n std::vector::const_iterator j = i + N;\n baz (j);\n }\n} #pragma omp parallel for schedule (static, 10)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "oid\nf6 (const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\nfor (std::vector::const_iterator i = x + 2000 - 64;\n i > y + 10; i = i - 12 + 2)\n {\n std::vector::const_iterator j = i + N;\n baz (j);\n } #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 6)", "context_chars": 100, "text": " i,\n const std::vector::const_iterator &x,\n const std::vector::const_iterator &y)\n{\nfor (i = x - 10; i <= y + 10; i += N)\n baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n std::vector::const_iterator i;\n#pragma omp parallel for schedule (dynamic, 40)\n for (i = j.begin (); i <= j.end () + N; i += 2)\n baz (i);\n} #pragma omp parallel for schedule (dynamic, 6)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 40)", "context_chars": 100, "text": "i += N)\n baz (i);\n}\n\ntemplate \nvoid\nf8 (J j)\n{\n std::vector::const_iterator i;\nfor (i = j.begin (); i <= j.end () + N; i += 2)\n baz (i);\n}\n\ntemplate \nvoid\nf9 (const typename std::vector::const_iterator &x,\n const typename std::vector::const_iterator &y)\n{\n#pragma omp parallel for schedule (static, 25)\n for (typename std::vector::const_iterator i = x; i <= y; i = i + N)\n baz (i);\n} #pragma omp parallel for schedule (dynamic, 40)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 25)", "context_chars": 100, "text": "typename std::vector::const_iterator &x,\n const typename std::vector::const_iterator &y)\n{\nfor (typename std::vector::const_iterator i = x; i <= y; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf10 (const typename std::vector::const_iterator &x,\n const typename std::vector::const_iterator &y)\n{\n typename std::vector::const_iterator i;\n#pragma omp parallel for\n for (i = x; i > y; i = i + N)\n baz (i);\n} #pragma omp parallel for schedule (static, 25)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const typename std::vector::const_iterator &y)\n{\n typename std::vector::const_iterator i;\nfor (i = x; i > y; i = i + N)\n baz (i);\n}\n\ntemplate \nvoid\nf11 (const T &x, const T &y)\n{\n#pragma omp parallel\n {\n#pragma omp for nowait schedule (static, 2)\n for (T i = x; i <= y; i += 3)\n baz (i);\n#pragma omp single\n {\n T j = y + 3;\n baz (j);\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 130)", "context_chars": 100, "text": "y + 3;\n baz (j);\n }\n }\n}\n\ntemplate \nvoid\nf12 (const T &x, const T &y)\n{\n T i;\nfor (i = x; i > y; --i)\n baz (i);\n}\n\ntemplate \nstruct K\n{\n template \n static void\n f13 (const T &x, const T &y)\n {\n#pragma omp parallel for schedule (runtime)\n for (T i = x; i <= y + N; i += N)\n baz (i);\n }\n} #pragma omp parallel for schedule (dynamic, 130)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-3.C", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "emplate \nstruct K\n{\n template \n static void\n f13 (const T &x, const T &y)\n {\nfor (T i = x; i <= y + N; i += N)\n baz (i);\n }\n};\n\n#define check(expr) \\\n for (int i = 0; i < 2000; i++)\t\t\t\\\n if (expr)\t\t\t\t\t\t\\\n {\t\t\t\t\t\t\t\\\n\tif (results[i] != 1)\t\t\t\t\\\n\t std::abort ();\t\t\t\t\\\n\tresults[i] = 0;\t\t\t\t\t\\\n } #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-11.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[-1:2][:][0:2], z[t + 2:4]) \\", "context_chars": 100, "text": "][2], int *y, long (&w)[1][2], int s, int t)\n{\n unsigned long long a[9] = {};\n short b[5] = {};\n reduction(*:y[-s:3]) reduction(|:a[s + 3:4]) \\\n\t\t\t reduction(&:w[s + 1:][t:2]) reduction(max:b[2:])\n for (int i = 0; i < 128; i++)\n {\n x[i / 64 - 1][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[1] *= 3;\n if ((i & 31) == 2)\n\ty[2] *= 7;\n if ((i & 63) == 3)\n\ty[3] *= 17;\n z[i / 32 + 2] += (i & 3);\n if (i < 4)\n\tz[i + 2] += i;\n a[i / 32 + 2] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[-1:2][:][0:2], z[t + 2:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-11.C", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[-1:2][:][0:2], z[t + 2:4]) \\", "context_chars": 100, "text": "{}\n __attribute__((noinline, noclone)) void foo (int s, int t);\n};\n\nvoid\nS::foo (int s, int t)\n{\n reduction(*:y[-s:3]) reduction(|:a[s + 3:4]) \\\n\t\t\t reduction(&:w[s + 1:][t:2]) reduction(max:b[2:])\n for (int i = 0; i < 128; i++)\n {\n x[i / 64 - 1][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[1] *= 3;\n if ((i & 31) == 2)\n\ty[2] *= 7;\n if ((i & 63) == 3)\n\ty[3] *= 17;\n z[i / 32 + 2] += (i & 3);\n if (i < 4)\n\tz[i + 2] += i;\n a[i / 32 + 2] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[-1:2][:][0:2], z[t + 2:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr27337.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (ret) lastprivate (ret) \\", "context_chars": 100, "text": ";\n if (omp_get_thread_num () != 0)\n#pragma omp atomic\n n[2]++;\n}\n\nS\nfoo ()\n{\n int i;\n S ret;\n\nschedule (static, 1) num_threads (4)\n for (i = 0; i < 4; i++)\n ret.i += omp_get_thread_num ();\n\n return ret;\n}\n\nS\nbar ()\n{\n int i;\n S ret;\n\n#pragma omp parallel for num_threads (4)\n for (i = 0; i < 4; i++)\n#pragma omp atomic\n ret.i += omp_get_thread_num () + 1;\n\n return ret;\n} #pragma omp parallel for firstprivate (ret) lastprivate (ret) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr27337.C", "omp_pragma_line": "#pragma omp parallel for num_threads (4)", "context_chars": 100, "text": " 0; i < 4; i++)\n ret.i += omp_get_thread_num ();\n\n return ret;\n}\n\nS\nbar ()\n{\n int i;\n S ret;\n\nfor (i = 0; i < 4; i++)\n#pragma omp atomic\n ret.i += omp_get_thread_num () + 1;\n\n return ret;\n}\n\nS x;\n\nint\nmain (void)\n{\n omp_set_dynamic (false);\n x = foo ();\n if (n[0] != 0 || n[1] != 3 || n[2] != 3)\n abort ();\n if (x.i != 118 + 3)\n abort ();\n x = bar ();\n if (n[0] != 0 || n[1] != 3 || n[2] != 3)\n abort ();\n if (x.i != 18 + 0 + 1 + 2 + 3 + 4)\n abort ();\n return 0;\n} #pragma omp parallel for num_threads (4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr39573.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "line))\nbar (int *x)\n{\n #pragma omp atomic\n z += x[2];\n x[2] += x[3];\n}\n\nint\nmain ()\n{\n int i;\nfor (i = 0; i < 65536; i++)\n {\n int x[] =\n\t{\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n\t};\n bar (x);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/examples-4/target_data-5.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "loat *&v1, float *&v2, int n)\n{\n #pragma omp target map(to: v1[0:n], v2[:n]) map(from: p[0:n])\n for (int i = 0; i < n; i++)\n\tp[i] = v1[i] * v2[i];\n}\n\nint main ()\n{\n float *p = new float [N];\n float *p1 = new float [N];\n float *v1 = new float [N];\n float *v2 = new float [N];\n\n init (v1, v2, N);\n\n vec_mult_ref (p, v1, v2, N);\n vec_mult (p1, v1, v2, N);\n\n check (p, p1, N);\n\n delete [] p;\n delete [] p1;\n delete [] v1;\n delete [] v2;\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.hsa.c/tiling-2.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "this BLOCK */\n// - - - - - - - - - - - - - - - - - - - -\n// REPLACE NEXT THREE LINES WITH A BARRIER\nfor (int row=0 ; row < BLOCK_SIZE ; row++) {\n for (int col=0 ; col < BLOCK_SIZE ; col++) {\n// END BARRIER\n// - - - - - - - - - - - - - - - - - - - -\n Cs[row][col] = 0.0;\n }\n } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.hsa.c/tiling-2.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": " local memory\n// - - - - - - - - - - - - - - - - - - - -\n// REPLACE NEXT THREE LINES WITH A BARRIER\nfor (int row=0 ; row < BLOCK_SIZE ; row++) {\n for (int col=0 ; col < BLOCK_SIZE ; col++) {\n// END BARRIER\n// - - - - - - - - - - - - - - - - - - - -\n C_row = C_row_start + row;\n C_col = C_col_start + col;\n\t\t if ((C_row < M) && (kblock + col < K))\n\t\t As[row][col] = A[(C_row*LDA)+ kblock + col];\n\t\t else\n\t\t As[row][col] = 0;\n\t\t if ((kblock + row < K) && C_col < N)\n\t\t Bs[row][col] = B[((kblock+row)*LDB)+ C_col];\n\t\t else\n\t\t Bs[row][col] = 0;\n }\n } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.hsa.c/tiling-2.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "s all kblocks\n// - - - - - - - - - - - - - - - - - - - -\n// REPLACE NEXT THREE LINES WITH A BARRIER\nfor (int row=0 ; row < BLOCK_SIZE ; row++) {\n for (int col=0 ; col < BLOCK_SIZE ; col++) {\n// END BARRIER\n// - - - - - - - - - - - - - - - - - - - -\n for (int e = 0; e < BLOCK_SIZE; ++e)\n Cs[row][col] += As[row][e] * Bs[e][col];\n }\n } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.hsa.c/tiling-2.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "ual C from Cs\n// - - - - - - - - - - - - - - - - - - - -\n// REPLACE NEXT THREE LINES WITH A BARRIER\nfor (int row=0 ; row < BLOCK_SIZE ; row++) {\n for (int col=0 ; col < BLOCK_SIZE ; col++) {\n// END BARRIER\n// - - - - - - - - - - - - - - - - - - - -\n C_row = C_row_start + row;\n C_col = C_col_start + col;\n\t if ((C_row < M) && (C_col < N)) {\n\t\t C[(C_row*LDC)+C_col] = alpha*Cs[row][col] + beta*C[(C_row*LDC)+C_col];\n\t }\n }\n } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr49897-2.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) firstprivate(x) lastprivate(x, y)", "context_chars": 100, "text": "897 */\n/* { dg-do run } */\n\nextern void abort (void);\n\nint\nmain ()\n{\n int i, j, x = 0, y, sum = 0;\nfor (i = 0; i < 10; i++)\n {\n x = i;\n y = 0;\n #pragma omp parallel for reduction(+:sum) firstprivate(y) lastprivate(y)\n for (j = 0; j < 10; j++)\n\t{\n\t y = j;\n\t sum += y;\n\t}\n } #pragma omp parallel for reduction(+:sum) firstprivate(x) lastprivate(x, y)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr49897-2.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) firstprivate(y) lastprivate(y)", "context_chars": 100, "text": "m) firstprivate(x) lastprivate(x, y)\n for (i = 0; i < 10; i++)\n {\n x = i;\n y = 0;\n for (j = 0; j < 10; j++)\n\t{\n\t y = j;\n\t sum += y;\n\t} #pragma omp parallel for reduction(+:sum) firstprivate(y) lastprivate(y)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/reduction-6.c", "omp_pragma_line": "#pragma omp parallel for reduction (min:f) reduction (max:j)", "context_chars": 100, "text": "*/\n\nextern void abort (void);\nint j;\nfloat f;\n\nint\nmain ()\n{\n j = -10000;\n f = 1024.0;\n int i;\n for (i = 0; i < 4; i++)\n switch (i)\n\t{\n\tcase 0:\n\t if (j < -16) j = -16; break;\n\tcase 1:\n\t if (f > -2.0) f = -2.0; break;\n\tcase 2:\n\t if (j < 8) j = 8; if (f > 9.0) f = 9.0; break;\n\tcase 3:\n\t break;\n\t} #pragma omp parallel for reduction (min:f) reduction (max:j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/atomic-5.c", "omp_pragma_line": "#pragma omp parallel for shared (d)", "context_chars": 100, "text": "oid abort (void);\n\nint __attribute__((noinline))\ndo_test (void)\n{\n long double d = .0L;\n int i;\n for (i = 0; i < 10; i++)\n #pragma omp atomic\n\td += 1.0L;\n if (d != 10.0L)\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n#ifdef __x86_64__\n unsigned int eax, ebx, ecx, edx;\n\n if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx))\n return 0;\n\n if (!(ecx & bit_CMPXCHG16B))\n return 0;\n\n\n do_test ();\n\n return 0;\n} #pragma omp parallel for shared (d)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/collapse-3.c", "omp_pragma_line": "#pragma omp parallel for collapse(4 - 1) schedule(static, 4)", "context_chars": 100, "text": "\n\nint\nmain (void)\n{\n int i2, l = 0;\n int a[3][3][3];\n\n memset (a, '\\0', sizeof (a));\n for (int i = 0; i < 2; i++)\n for (int j = 0; j < 2; j++)\n\tfor (int k = 0; k < 2; k++)\n\t a[i][j][k] = i + j * 4 + k * 16;\n #pragma omp parallel\n {\n #pragma omp for collapse(2) reduction(|:l)\n\tfor (i2 = 0; i2 < 2; i2++)\n\t for (int j = 0; j < 2; j++)\n\t for (int k = 0; k < 2; k++)\n\t if (a[i2][j][k] != i2 + j * 4 + k * 16)\n\t\tl = 1;\n } #pragma omp parallel for collapse(4 - 1) schedule(static, 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr30494.c", "omp_pragma_line": "#pragma omp parallel for num_threads (4)", "context_chars": 100, "text": "get_thread_num ();\n check (m, i, v, w);\n }\n return 0;\n}\n\nint\nbar (int n, int m)\n{\n int i;\nfor (i = 0; i < 6; i++)\n {\n int v[n], w[n * 3 + i], j;\n for (j = 0; j < n; j++)\n\tv[j] = j + omp_get_thread_num ();\n for (j = 0; j < n * 3 + i; j++)\n\tw[j] = j + 10 + omp_get_thread_num ();\n check (m, i, v, w);\n } #pragma omp parallel for num_threads (4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/collapse-2.c", "omp_pragma_line": "#pragma omp parallel for num_threads (8) collapse(3) \\", "context_chars": 100, "text": "#include \n\nint\nmain (void)\n{\n int i, j, k, l = 0, f = 0;\n int m1 = 4, m2 = -5, m3 = 17;\n\n schedule(static, 9) reduction(+:l) \\\n\t\t firstprivate(f)\n for (i = -2; i < m1; i++)\n for (j = m2; j < -2; j++)\n\t{\n\t for (k = 13; k < m3; k++)\n\t {\n\t if (omp_get_num_threads () == 8\n\t\t && ((i + 2) * 12 + (j + 5) * 4 + (k - 13)\n\t\t != (omp_get_thread_num () * 9\n\t\t\t + f++)))\n\t\tl++;\n\t }\n\t} #pragma omp parallel for num_threads (8) collapse(3) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/ordered-5.c", "omp_pragma_line": "#pragma omp parallel for simd ordered", "context_chars": 100, "text": "target avx_runtime } } */\n\nextern void abort (void);\nint a[1024], b = -1;\n\nint\nmain ()\n{\n int i;\n for (i = 0; i < 1024; i++)\n {\n a[i] = i;\n #pragma omp ordered threads simd\n {\n\tif (b + 1 != i)\n\t abort ();\n\tb = i;\n }\n a[i] += 3;\n } #pragma omp parallel for simd ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l)", "context_chars": 100, "text": "mp for reduction(+: l)\n for (i = ji; i < ki; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = ji; i < ki; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": " ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": " ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l)", "context_chars": 100, "text": "mp for reduction(+: l)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": " ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": " ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "= ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l)", "context_chars": 100, "text": " for reduction(+: l)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "= jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "ll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "ll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "l; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l)", "context_chars": 100, "text": "or reduction(+: l)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "l; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "l; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3)", "context_chars": 100, "text": "i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-35.c", "omp_pragma_line": "#pragma omp parallel for shared (a, b)", "context_chars": 100, "text": "int y, int z, int *a, int *b)\n{\n if (x == 0)\n {\n int i, j;\n for (i = 0; i < 64; i++)\n\tfor (j = 0; j < 32; j++)\n\t foo (3, i, j, a, b);\n }\n else if (x == 1)\n {\n int i, j;\n #pragma omp distribute dist_schedule (static, 1)\n for (i = 0; i < 64; i++)\n\t#pragma omp parallel for shared (a, b)\n\tfor (j = 0; j < 32; j++)\n\t foo (3, i, j, a, b);\n } #pragma omp parallel for shared (a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-35.c", "omp_pragma_line": "#pragma omp parallel for shared (a, b)", "context_chars": 100, "text": " int i, j;\n #pragma omp distribute dist_schedule (static, 1)\n for (i = 0; i < 64; i++)\n\tfor (j = 0; j < 32; j++)\n\t foo (3, i, j, a, b);\n }\n else if (x == 2)\n {\n int j;\n #pragma omp parallel for shared (a, b)\n for (j = 0; j < 32; j++)\n\tfoo (3, y, j, a, b);\n } #pragma omp parallel for shared (a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-35.c", "omp_pragma_line": "#pragma omp parallel for shared (a, b)", "context_chars": 100, "text": "\tfor (j = 0; j < 32; j++)\n\t foo (3, i, j, a, b);\n }\n else if (x == 2)\n {\n int j;\n for (j = 0; j < 32; j++)\n\tfoo (3, y, j, a, b);\n }\n else\n {\n #pragma omp atomic\n b[y] += z;\n #pragma omp atomic\n *a += 1;\n } #pragma omp parallel for shared (a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/reduction-9.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\", "context_chars": 100, "text": " int i;\n for (i = 0; i < p7 + 4; i++)\n {\n if (i < p7)\n\tb[i] = -6;\n a[i] = 0;\n }\n reduction(*:y[:p4]) reduction(|:a[:p5]) \\\n\t\t\t reduction(&:w[0:p6 - 1][:p6]) reduction(max:b)\n for (i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[0] *= 3;\n if ((i & 31) == 2)\n\ty[1] *= 7;\n if ((i & 63) == 3)\n\ty[2] *= 17;\n z[i / 32] += (i & 3);\n if (i < 4)\n\tz[i] += i;\n a[i / 32] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-2.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": "b, c, x);\n fn1 (e, d + x, x);\n #pragma omp target map(to: b, c[:x], d[x:x], e) map(tofrom: s)\n for (i = 0; i < x; i++)\n\ts += b[i] * c[i] + d[x + i] + sizeof (b) - sizeof (c);\n return s;\n}\n\ndouble\nfn3 (int x)\n{\n double s = 0;\n double b[3 * x], c[3 * x], d[3 * x], e[3 * x];\n int i;\n fn1 (b, c, x);\n fn1 (e, d, x);\n #pragma omp target map(tofrom: s)\n #pragma omp parallel for reduction(+:s)\n for (i = 0; i < x; i++)\n\ts += b[i] * c[i] + d[i];\n return s;\n} #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-2.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": " * x], e[3 * x];\n int i;\n fn1 (b, c, x);\n fn1 (e, d, x);\n #pragma omp target map(tofrom: s)\n for (i = 0; i < x; i++)\n\ts += b[i] * c[i] + d[i];\n return s;\n}\n\ndouble\nfn4 (int x)\n{\n double s = 0;\n double b[3 * x], c[3 * x], d[3 * x], e[3 * x];\n int i;\n fn1 (b, c, x);\n fn1 (e, d + x, x);\n #pragma omp target data map(from: b, c[:x], d[x:x], e)\n {\n #pragma omp target update to(b, c[:x], d[x:x], e)\n #pragma omp target map(c[:x], d[x:x], s)\n\t#pragma omp parallel for reduction(+:s)\n\t for (i = 0; i < x; i++)\n\t {\n\t s += b[i] * c[i] + d[x + i] + sizeof (b) - sizeof (c);\n\t b[i] = i + 0.5;\n\t c[i] = 0.5 - i;\n\t d[x + i] = 0.5 * i;\n\t }\n }\n for (i = 0; i < x; i++)\n if (b[i] != i + 0.5 || c[i] != 0.5 - i || d[x + i] != 0.5 * i)\n abort ();\n return s;\n} #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-2.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": " #pragma omp target update to(b, c[:x], d[x:x], e)\n #pragma omp target map(c[:x], d[x:x], s)\n\tfor (i = 0; i < x; i++)\n\t {\n\t s += b[i] * c[i] + d[x + i] + sizeof (b) - sizeof (c);\n\t b[i] = i + 0.5;\n\t c[i] = 0.5 - i;\n\t d[x + i] = 0.5 * i;\n\t } #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) schedule(runtime)", "context_chars": 100, "text": ": l) schedule(runtime)\n for (i = ji; i < ki; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = ji; i < ki; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": " ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": " ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) schedule(runtime)", "context_chars": 100, "text": ": l) schedule(runtime)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": " ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": " ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "= ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) schedule(runtime)", "context_chars": 100, "text": "l) schedule(runtime)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "= jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "ll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "ll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "l; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) schedule(runtime)", "context_chars": 100, "text": " schedule(runtime)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "l; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "l; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-15.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)", "context_chars": 100, "text": "i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/omp_workshare4.c", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": "ializations */\nfor (i=0; i < N; i++)\n a[i] = b[i] = i * 1.0;\nchunk = CHUNKSIZE;\nfirst_time = 'y';\n\nshared(a,b,c,chunk) \\\n private(i,tid) \\\n schedule(static,chunk) \\\n firstprivate(first_time)\n\n for (i=0; i < N; i++)\n {\n if (first_time == 'y')\n {\n tid = omp_get_thread_num();\n first_time = 'n';\n }\n c[i] = a[i] + b[i];\n printf(\"tid= %d i= %d c[i]= %f\\n\", tid, i, c[i]);\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (static, 1) num_threads (4) if (0)", "context_chars": 100, "text": "nt;\n\nvoid\ncheck (int x)\n{\n if (cnt++ != x)\n abort ();\n}\n\nint\nmain (void)\n{\n int j;\n\n cnt = 0;\nfor (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n } #pragma omp parallel for ordered schedule (static, 1) num_threads (4) if (0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (static, 1) num_threads (4) if (1)", "context_chars": 100, "text": "4) if (0)\n for (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n }\n\n cnt = 0;\nfor (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n } #pragma omp parallel for ordered schedule (static, 1) num_threads (4) if (1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (runtime) num_threads (4) if (0)", "context_chars": 100, "text": "4) if (1)\n for (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n }\n\n cnt = 0;\nfor (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n } #pragma omp parallel for ordered schedule (runtime) num_threads (4) if (0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (runtime) num_threads (4) if (1)", "context_chars": 100, "text": "4) if (0)\n for (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n }\n\n cnt = 0;\nfor (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n } #pragma omp parallel for ordered schedule (runtime) num_threads (4) if (1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (dynamic) num_threads (4) if (0)", "context_chars": 100, "text": "4) if (1)\n for (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n }\n\n cnt = 0;\nfor (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n } #pragma omp parallel for ordered schedule (dynamic) num_threads (4) if (0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (dynamic) num_threads (4) if (1)", "context_chars": 100, "text": "4) if (0)\n for (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n }\n\n cnt = 0;\nfor (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n } #pragma omp parallel for ordered schedule (dynamic) num_threads (4) if (1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (guided) num_threads (4) if (0)", "context_chars": 100, "text": "4) if (1)\n for (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n }\n\n cnt = 0;\nfor (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n } #pragma omp parallel for ordered schedule (guided) num_threads (4) if (0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (guided) num_threads (4) if (1)", "context_chars": 100, "text": "4) if (0)\n for (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n }\n\n cnt = 0;\nfor (j = 0; j < 1000; j++)\n {\n#pragma omp ordered\n check (j);\n } #pragma omp parallel for ordered schedule (guided) num_threads (4) if (1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/reduction-7.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\", "context_chars": 100, "text": "t (*x)[3][2], int *y, long w[1][2])\n{\n unsigned long long a[9] = {};\n short b[5] = {};\n int i;\n reduction(*:y[:3]) reduction(|:a[:4]) \\\n\t\t\t reduction(&:w[0:1][:2]) reduction(max:b)\n for (i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[0] *= 3;\n if ((i & 31) == 2)\n\ty[1] *= 7;\n if ((i & 63) == 3)\n\ty[2] *= 17;\n z[i / 32] += (i & 3);\n if (i < 4)\n\tz[i] += i;\n a[i / 32] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-13.c", "omp_pragma_line": "#pragma omp parallel for simd linear(k : 3) reduction(+: s) schedule (static, 16)", "context_chars": 100, "text": "{ dg-additional-options \"-mavx\" { target avx_runtime } } */\n\nint\nmain ()\n{\n int k = 0, i, s = 0;\n for (i = 0; i < 128; i++)\n {\n k = k + 3;\n s = s + k;\n } #pragma omp parallel for simd linear(k : 3) reduction(+: s) schedule (static, 16)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/reduction-8.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\", "context_chars": 100, "text": " A (*x)[3][2], struct A *y, struct D w[1][2])\n{\n struct C a[9] = {};\n short b[5] = {};\n int i;\n reduction(*:y[:3]) reduction(|:a[:4]) \\\n\t\t\t reduction(&:w[0:1][:2]) reduction(maxb:b)\n for (i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[0].t *= 3;\n if ((i & 31) == 2)\n\ty[1].t *= 7;\n if ((i & 63) == 3)\n\ty[2].t *= 17;\n z[i / 32].t += (i & 3);\n if (i < 4)\n\tz[i].t += i;\n a[i / 32].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "string.h>\n\nint\ntest1 (void)\n{\n short int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": ";\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": ";\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": ";\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; p < &buf[54]; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; p <= &buf[63]; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p < &buf[51]; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; p > &buf[9]; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; p >= &buf[3]; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; p > &buf[15]; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-6.c", "omp_pragma_line": "#pragma omp parallel for simd aligned(a : 32) reduction(+:s) \\", "context_chars": 100, "text": "bute__((noinline, noclone)) int\nfoo (void)\n{\n int i, u = 0;\n struct S s, t;\n s.s = 0; t.s = 0;\n reduction(foo:t, u)\n for (i = 0; i < 1024; i++)\n {\n int x = a[i];\n s.s += x;\n t.s += x;\n u += x;\n } #pragma omp parallel for simd aligned(a : 32) reduction(+:s) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/reduction-10.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\", "context_chars": 100, "text": "int i;\n for (i = 0; i < p7 + 4; i++)\n {\n if (i < p7)\n\tb[i] = -6;\n a[i].t = 0;\n }\n reduction(*:y[:p4]) reduction(|:a[:p5]) \\\n\t\t\t reduction(&:w[0:p6 - 1][:p6]) reduction(maxb:b)\n for (i = 0; i < 128; i++)\n {\n x[i / 64][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[0].t *= 3;\n if ((i & 31) == 2)\n\ty[1].t *= 7;\n if ((i & 63) == 3)\n\ty[2].t *= 17;\n z[i / 32].t += (i & 3);\n if (i < 4)\n\tz[i].t += i;\n a[i / 32].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 79) > b[0])\n\tb[0] = i % 79;\n if ((i % 13) > b[1])\n\tb[1] = i % 13;\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-16.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ");\n}\n\nvoid\nbar (int n)\n{\n int a[n], i, err;\n #pragma omp target private (a) map(from:err)\n {\n for (i = 0; i < n; i++)\n a[i] = 7 * i;\n err = 0;\n #pragma omp parallel for reduction(|:err)\n for (i = 0; i < n; i++)\n if (a[i] != 7 * i)\n\terr |= 1;\n }\n if (err)\n abort ();\n}\n\nint\nmain ()\n{\n foo (7);\n bar (7);\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-16.c", "omp_pragma_line": "#pragma omp parallel for reduction(|:err)", "context_chars": 100, "text": ")\n {\n #pragma omp parallel for\n for (i = 0; i < n; i++)\n a[i] = 7 * i;\n err = 0;\n for (i = 0; i < n; i++)\n if (a[i] != 7 * i)\n\terr |= 1;\n }\n if (err)\n abort ();\n}\n\nint\nmain ()\n{\n foo (7);\n bar (7);\n return 0;\n} #pragma omp parallel for reduction(|:err)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: 4)", "context_chars": 100, "text": "int a[256];\n\n__attribute__((noinline, noclone)) int\nf1 (int i)\n{\n for (int j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1)", "context_chars": 100, "text": "= 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf2 (short int i, char k)\n{\n for (long j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k)", "context_chars": 100, "text": "rn i;\n}\n\n__attribute__((noinline, noclone)) long long int\nf3 (long long int i, long long int k)\n{\n for (short j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: 4) schedule(static, 3)", "context_chars": 100, "text": " a[i] = j;\n i += 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) int\nf4 (int i)\n{\n for (int j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: 4) schedule(static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1) schedule(static, 5)", "context_chars": 100, "text": "= 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf5 (short int i, char k)\n{\n for (long j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1) schedule(static, 5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k) schedule(static, 7)", "context_chars": 100, "text": "rn i;\n}\n\n__attribute__((noinline, noclone)) long long int\nf6 (long long int i, long long int k)\n{\n for (short j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k) schedule(static, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: 4) schedule(dynamic, 3)", "context_chars": 100, "text": " a[i] = j;\n i += 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) int\nf7 (int i)\n{\n for (int j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: 4) schedule(dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1) schedule(dynamic, 5)", "context_chars": 100, "text": "= 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf8 (short int i, char k)\n{\n for (long j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1) schedule(dynamic, 5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k) schedule(dynamic, 7)", "context_chars": 100, "text": "rn i;\n}\n\n__attribute__((noinline, noclone)) long long int\nf9 (long long int i, long long int k)\n{\n for (short j = 16; j < 64; j++)\n {\n a[i] = j;\n i += 4;\n } #pragma omp parallel for linear (i: k) schedule(dynamic, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: 4)", "context_chars": 100, "text": " i += 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) int\nf10 (int i, long step)\n{\n for (int j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1)", "context_chars": 100, "text": " return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf11 (short int i, char k, char step)\n{\n for (long j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k)", "context_chars": 100, "text": "attribute__((noinline, noclone)) long long int\nf12 (long long int i, long long int k, int step)\n{\n for (short j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: 4) schedule(static, 3)", "context_chars": 100, "text": " 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) int\nf13 (int i, long long int step)\n{\n for (int j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: 4) schedule(static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1) schedule(static, 5)", "context_chars": 100, "text": " return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf14 (short int i, char k, int step)\n{\n for (long j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1) schedule(static, 5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k) schedule(static, 7)", "context_chars": 100, "text": "bute__((noinline, noclone)) long long int\nf15 (long long int i, long long int k, long int step)\n{\n for (short j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k) schedule(static, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: 4) schedule(dynamic, 3)", "context_chars": 100, "text": " 4;\n }\n return i;\n}\n\n__attribute__((noinline, noclone)) int\nf16 (int i, long long int step)\n{\n for (int j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: 4) schedule(dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k + 1) schedule(dynamic, 5)", "context_chars": 100, "text": " return i;\n}\n\n__attribute__((noinline, noclone)) short int\nf17 (short int i, char k, int step)\n{\n for (long j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k + 1) schedule(dynamic, 5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i: k) schedule(dynamic, 7)", "context_chars": 100, "text": "bute__((noinline, noclone)) long long int\nf18 (long long int i, long long int k, long int step)\n{\n for (short j = 16; j < 112; j += step)\n {\n a[i] = j / 2 + 8;\n i += 4;\n } #pragma omp parallel for linear (i: k) schedule(dynamic, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr35130.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int a[4], k;\n void nested (int x)\n {\n a[x] = 42;\n }\n\n for (k = 0; k < 4; k++)\n a[k] = 0;\nfor (k = 0; k < 4; k++)\n nested (k);\n\n if (a[0] != 42 || a[1] != 42 || a[2] != 42 || a[3] != 42)\n abort ();\n}\n\nvoid\nf2 (void)\n{\n int a[4], k;\n void nested (void)\n {\n int l;\n void nested2 (int x)\n {\n a[x] = 42;\n }\n#pragma omp parallel for\n for (l = 0; l < 4; l++)\n nested2 (l);\n }\n\n for (k = 0; k < 4; k++)\n a[k] = 0;\n\n nested ();\n\n if (a[0] != 42 || a[1] != 42 || a[2] != 42 || a[3] != 42)\n abort ();\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr35130.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " a[4], k;\n void nested (void)\n {\n int l;\n void nested2 (int x)\n {\n a[x] = 42;\n }\nfor (l = 0; l < 4; l++)\n nested2 (l);\n }\n\n for (k = 0; k < 4; k++)\n a[k] = 0;\n\n nested ();\n\n if (a[0] != 42 || a[1] != 42 || a[2] != 42 || a[3] != 42)\n abort ();\n}\n\nvoid\nf3 (void)\n{\n int a[4], b[4], c[4], k;\n void nested (int x)\n {\n a[x] = b[x] = c[x] = 42;\n }\n\n for (k = 0; k < 4; k++)\n a[k] = b[k] = c[k] = 0;\n nested (0);\n\n#pragma omp parallel\n {\n #pragma omp single\n {\n a[1] = 43;\n b[1] = 43;\n }\n #pragma omp parallel\n {\n #pragma omp single\n {\n\tb[2] = 44;\n\tc[2] = 44;\n }\n }\n }\n\n if (a[0] != 42 || a[1] != 43 || a[2] != 0 || a[3] != 0)\n abort ();\n if (b[0] != 42 || b[1] != 43 || b[2] != 44 || b[3] != 0)\n abort ();\n if (c[0] != 42 || c[1] != 0 || c[2] != 44 || c[3] != 0)\n abort ();\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr26943-2.c", "omp_pragma_line": "#pragma omp parallel for shared (a, e) firstprivate (b, f) \\", "context_chars": 100, "text": "0] = \"a\", f[10] = \"b\", g[10] = \"c\", h[10] = \"d\";\n\nint\nmain (void)\n{\n int i;\n omp_set_dynamic (0);\nlastprivate (c, g) private (d, h) \\\n\t\t\t schedule (static, 1) num_threads (4) \\\n\t\t\t reduction (+:j)\n for (i = 0; i < 4; i++)\n {\n if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b')\n\tj++;\n GOMP_barrier ();\n#pragma omp atomic\n a += i;\n b += i;\n c = i;\n d = i;\n#pragma omp atomic\n e[0] += i;\n f[0] += i;\n g[0] = 'g' + i;\n h[0] = 'h' + i;\n GOMP_barrier ();\n if (a != 8 + 6 || b != 12 + i || c != i || d != i)\n\tj += 8;\n if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i)\n\tj += 64;\n if (h[0] != 'h' + i)\n\tj += 512;\n } #pragma omp parallel for shared (a, e) firstprivate (b, f) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-1.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (dynamic)", "context_chars": 100, "text": "j2 + 1))\n abort ();\n}\n\nvoid\ntest9 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = j1; i <= k1; ++i)\n {\n if (i < j2 || i > k2)\n\t++e;\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-1.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (dynamic)", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest10 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = k1; i >= j1; --i)\n {\n if (i < j2 || i > k2)\n\t++e;\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-1.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (guided)", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest11 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = j1; i <= k1; ++i)\n {\n if (i < j2 || i > k2)\n\t++e;\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-1.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (guided)", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest12 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = k1; i >= j1; --i)\n {\n if (i < j2 || i > k2)\n\t++e;\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-1.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest13 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = j1; i <= k1; ++i)\n {\n if (i < j2 || i > k2)\n\t++e;\n#pragma omp ordered\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-1.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest14 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = k1; i >= j1; --i)\n {\n if (i < j2 || i > k2)\n\t++e;\n#pragma omp ordered\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-1.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (guided) ordered", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest15 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = j1; i <= k1; ++i)\n {\n if (i < j2 || i > k2)\n\t++e;\n#pragma omp ordered\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (guided) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-1.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (guided) ordered", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest16 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = k1; i >= j1; --i)\n {\n if (i < j2 || i > k2)\n\t++e;\n#pragma omp ordered\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (guided) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/omp-nested-1.c", "omp_pragma_line": "#pragma omp parallel for num_threads(2) shared (i)", "context_chars": 100, "text": "// { dg-do run }\n\nextern void abort(void);\n#define N 1000\n\nint foo()\n{\n int i = 0, j;\n\n for (j = 0; j < N; ++j)\n {\n #pragma omp parallel num_threads(1) shared (i)\n {\n\t#pragma omp atomic\n\ti++;\n }\n } #pragma omp parallel for num_threads(2) shared (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr26943-1.c", "omp_pragma_line": "#pragma omp parallel for num_threads (16) firstprivate (n) lastprivate (n) \\", "context_chars": 100, "text": "rn void omp_set_dynamic (int);\nint n = 6;\n\nint\nmain (void)\n{\n int i, x = 0;\n omp_set_dynamic (0);\nschedule (static, 1) reduction (+: x)\n for (i = 0; i < 16; i++)\n {\n if (n != 6)\n\t++x;\n n = i;\n } #pragma omp parallel for num_threads (16) firstprivate (n) lastprivate (n) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-16.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": " j + 2 * 8 * i);\n\tb = b + 2;\n }\n if (c || b != 7 + 64 * 2)\n __builtin_abort ();\n b = 7;\n for (int i = 0; i < 64; i++)\n {\n c = c + (b != 7 + 2 * i);\n b = b + 2;\n } #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-16.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)", "context_chars": 100, "text": "= 7 + 2 * i);\n b = b + 2;\n }\n if (c || b != 7 + 64 * 2)\n __builtin_abort ();\n b = 7;\n for (int i = 0; i < 64; i += 4)\n {\n c = c + (b != 7 + i / 4 * 3);\n b = b + 3;\n } #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-16.c", "omp_pragma_line": "#pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": "+ i / 4 * 3);\n b = b + 3;\n }\n if (c || b != 7 + 16 * 3)\n __builtin_abort ();\n b = 7;\n for (int i = 0; i < 8; i++)\n for (int j = 0; j < 8; j++)\n {\n\tc = c + (b != 7 + 2 * j + 2 * 8 * i);\n\tb = b + 2;\n } #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr58392.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: c)", "context_chars": 100, "text": "id);\nint d[32 * 32];\n\n__attribute__((noinline, noclone)) int\nfoo (int a, int b)\n{\n int j, c = 0;\n for (j = 0; j < a; j += 32)\n {\n\tint l;\n\t#pragma omp simd reduction(+: c)\n\t for (l = 0; l < b; ++l)\n\t c += d[j + l];\n } #pragma omp parallel for reduction(+: c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr58392.c", "omp_pragma_line": "#pragma omp parallel for simd reduction(+: c)", "context_chars": 100, "text": "+ l];\n }\n return c;\n}\n\n__attribute__((noinline, noclone)) int\nbar (int a)\n{\n int j, c = 0;\n for (j = 0; j < a; ++j)\n c += d[j];\n return c;\n}\n\n__attribute__((noinline)) static int\nbaz (int a)\n{\n int j, c = 0;\n #pragma omp simd reduction(+: c)\n for (j = 0; j < a; ++j)\n c += d[j];\n return c;\n} #pragma omp parallel for simd reduction(+: c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr26943-3.c", "omp_pragma_line": "#pragma omp parallel for shared (a, e) firstprivate (b, f) \\", "context_chars": 100, "text": "d (1);\n#pragma omp parallel num_threads (2) reduction (+:l)\n if (k == omp_get_thread_num ())\n {\nlastprivate (c, g) private (d, h) \\\n\t\t\t schedule (static, 1) num_threads (4) \\\n\t\t\t reduction (+:j)\n for (i = 0; i < 4; i++)\n\t{\n\t if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b')\n\t j++;\n\t GOMP_barrier ();\n#pragma omp atomic\n\t a += i;\n\t b += i;\n\t c = i;\n\t d = i;\n#pragma omp atomic\n\t e[0] += i;\n\t f[0] += i;\n\t g[0] = 'g' + i;\n\t h[0] = 'h' + i;\n\t GOMP_barrier ();\n\t if (a != 8 + 6 || b != 12 + i || c != i || d != i)\n\t j += 8;\n\t if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i)\n\t j += 64;\n\t if (h[0] != 'h' + i)\n\t j += 512;\n\t} #pragma omp parallel for shared (a, e) firstprivate (b, f) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/reduction-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[-1:p1 + 1][:p2], z[t + 2:p3]) \\", "context_chars": 100, "text": "int i;\n for (i = 0; i < p7 + 4; i++)\n {\n if (i < p7)\n\tb[i] = -6;\n a[i].t = 0;\n }\n reduction(*:y[-s:p4]) reduction(|:a[s + 3:p5]) \\\n\t\t\t reduction(&:w[s + 1:p6 - 1][t:p6]) reduction(maxb:b[2:])\n for (i = 0; i < 128; i++)\n {\n x[i / 64 - 1][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[1].t *= 3;\n if ((i & 31) == 2)\n\ty[2].t *= 7;\n if ((i & 63) == 3)\n\ty[3].t *= 17;\n z[i / 32 + 2].t += (i & 3);\n if (i < 4)\n\tz[i + 2].t += i;\n a[i / 32 + 2].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[-1:p1 + 1][:p2], z[t + 2:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) schedule(static, 2)", "context_chars": 100, "text": "l) schedule(static, 2)\n for (i = ji; i < ki; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = ji; i < ki; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": " ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": " ji; i < ki; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ji; i < ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "ki; i++)\n for (k = ki + 10; k < ji - 10; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = ki + 10; j < ji - 10; j++)\n for (i = ji; i < ki; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf1 (void)\n{\n unsigned int i, j, k;\n int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) schedule(static, 2)", "context_chars": 100, "text": "l) schedule(static, 2)\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = ju; i < ku; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": " ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": " ju; i < ku; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "= ju; i < ku; i++)\n for (k = ku; k < ju; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = ku; j < ju; j++)\n for (i = ju; i < ku; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf2 (void)\n{\n long long int i, j, k;\n unsigned long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) schedule(static, 2)", "context_chars": 100, "text": " schedule(static, 2)\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = jll; i < kll; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "= jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "ll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = jll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "ll; i < kll; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jll; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "l; i < kll; i++)\n for (k = kll; k < jll; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = kll; j < jll; j++)\n for (i = jll; i < kll; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nvoid\nf3 (void)\n{\n unsigned long long int i, j, k;\n long long int j2, k2;\n #pragma omp for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) schedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) schedule(static, 2)", "context_chars": 100, "text": "chedule(static, 2)\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n for (i = jull; i < kull; i++)\n l++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "l; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j2 = 0; j2 < 4; j2++)\n for (i = jull; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "l; i < kull; i++)\n for (k2 = 0; k2 < 5; k2 += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = 0; j < 4; j++)\n for (i = jull; i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-14.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)", "context_chars": 100, "text": "i < kull; i++)\n for (k = kull; k < jull; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n for (j = kull; j < jull; j++)\n for (i = jull; i < kull; i++)\n for (k = 0; k < 5; k += 2)\n\tl++;\n if (l != 0)\n __builtin_abort ();\n}\n\nint\nmain ()\n{\n f0 ();\n f1 ();\n f2 ();\n f3 ();\n return 0;\n} #pragma omp parallel for reduction(+: l) collapse(3) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-1.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": "te(x)\n\t#pragma omp distribute dist_schedule(static, 4) collapse(1)\n\t for (j=0; j < x; j += y)\n\t for (i = j; i < j + y; i++)\n\t\ttgt (), s += b[i] * c[i];\n #pragma omp target update from(b, tgtv)\n }\n return s;\n}\n\ndouble\nfn3 (int x)\n{\n double b[1024], c[1024], s = 0;\n int i;\n fn1 (b, c, x);\n #pragma omp target map(to: b, c) map(tofrom:s)\n #pragma omp parallel for reduction(+:s)\n for (i = 0; i < x; i++)\n\ttgt (), s += b[i] * c[i];\n return s;\n} #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-1.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": "24], c[1024], s = 0;\n int i;\n fn1 (b, c, x);\n #pragma omp target map(to: b, c) map(tofrom:s)\n for (i = 0; i < x; i++)\n\ttgt (), s += b[i] * c[i];\n return s;\n}\n\ndouble\nfn4 (int x, double *p)\n{\n double b[1024], c[1024], d[1024], s = 0;\n int i;\n fn1 (b, c, x);\n fn1 (d + x, p + x, x);\n #pragma omp target map(to: b, c[0:x], d[x:x]) map(to:p[x:64 + (x & 31)]) \\\n\t\t map(tofrom: s)\n #pragma omp parallel for reduction(+:s)\n for (i = 0; i < x; i++)\n\ts += b[i] * c[i] + d[x + i] + p[x + i];\n return s;\n} #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/target-1.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": "pragma omp target map(to: b, c[0:x], d[x:x]) map(to:p[x:64 + (x & 31)]) \\\n\t\t map(tofrom: s)\n for (i = 0; i < x; i++)\n\ts += b[i] * c[i] + d[x + i] + p[x + i];\n return s;\n}\n\nint\nmain ()\n{\n double a = fn2 (128, 4, 6);\n int b = tgtv;\n double c = fn3 (61);\n #pragma omp target update from(tgtv)\n int d = tgtv;\n double e[1024];\n double f = fn4 (64, e);\n if (a != 13888.0 || b != 6 + 128 || c != 4062.0 || d != 6 + 128 + 61\n || f != 8032.0)\n abort ();\n return 0;\n} #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/nonmonotonic-1.c", "omp_pragma_line": "#pragma omp parallel for schedule(nonmonotonic: dynamic)", "context_chars": 100, "text": "C_TYPE int\n#define NONMONOTONIC_END(n) n\n#endif\n\nint a[73];\n\nint\nmain ()\n{\n NONMONOTONIC_TYPE i;\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel for schedule(nonmonotonic: dynamic, 5)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel for schedule(nonmonotonic: guided)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel for schedule(nonmonotonic: guided, 7)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel\n {\n int cnt = omp_get_num_threads ();\n int thr = omp_get_thread_num ();\n if (thr < 73)\n a[thr]++;\n #pragma omp barrier\n #pragma omp for schedule(nonmonotonic: dynamic)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: dynamic, 7)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: guided)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: guided, 5)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp single private (i)\n for (i = 0; i < 73; i++)\n if (a[i] != 8 + (i < cnt))\n\tabort ();\n } #pragma omp parallel for schedule(nonmonotonic: dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/nonmonotonic-1.c", "omp_pragma_line": "#pragma omp parallel for schedule(nonmonotonic: dynamic, 5)", "context_chars": 100, "text": "lel for schedule(nonmonotonic: dynamic)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel for schedule(nonmonotonic: guided)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel for schedule(nonmonotonic: guided, 7)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel\n {\n int cnt = omp_get_num_threads ();\n int thr = omp_get_thread_num ();\n if (thr < 73)\n a[thr]++;\n #pragma omp barrier\n #pragma omp for schedule(nonmonotonic: dynamic)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: dynamic, 7)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: guided)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: guided, 5)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp single private (i)\n for (i = 0; i < 73; i++)\n if (a[i] != 8 + (i < cnt))\n\tabort ();\n } #pragma omp parallel for schedule(nonmonotonic: dynamic, 5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/nonmonotonic-1.c", "omp_pragma_line": "#pragma omp parallel for schedule(nonmonotonic: guided)", "context_chars": 100, "text": " for schedule(nonmonotonic: dynamic, 5)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel for schedule(nonmonotonic: guided, 7)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel\n {\n int cnt = omp_get_num_threads ();\n int thr = omp_get_thread_num ();\n if (thr < 73)\n a[thr]++;\n #pragma omp barrier\n #pragma omp for schedule(nonmonotonic: dynamic)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: dynamic, 7)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: guided)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: guided, 5)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp single private (i)\n for (i = 0; i < 73; i++)\n if (a[i] != 8 + (i < cnt))\n\tabort ();\n } #pragma omp parallel for schedule(nonmonotonic: guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/nonmonotonic-1.c", "omp_pragma_line": "#pragma omp parallel for schedule(nonmonotonic: guided, 7)", "context_chars": 100, "text": "llel for schedule(nonmonotonic: guided)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp parallel\n {\n int cnt = omp_get_num_threads ();\n int thr = omp_get_thread_num ();\n if (thr < 73)\n a[thr]++;\n #pragma omp barrier\n #pragma omp for schedule(nonmonotonic: dynamic)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: dynamic, 7)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: guided)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp for schedule(nonmonotonic: guided, 5)\n for (i = 0; i < NONMONOTONIC_END (73); i++)\n a[i]++;\n #pragma omp single private (i)\n for (i = 0; i < 73; i++)\n if (a[i] != 8 + (i < cnt))\n\tabort ();\n } #pragma omp parallel for schedule(nonmonotonic: guided, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-14.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": " }\n if (c || i != 8 || j != 8 || b != 7 + 64 * 2)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 64; i++)\n {\n c = c + (b != 7 + 2 * i);\n b = b + 2;\n } #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-14.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)", "context_chars": 100, "text": " + 2;\n }\n if (c || i != 64 || b != 7 + 64 * 2)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 64; i += 4)\n {\n c = c + (b != 7 + i / 4 * 3);\n b = b + 3;\n } #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-14.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)", "context_chars": 100, "text": " + 3;\n }\n if (c || i != 64 || b != 7 + 16 * 3)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 64; i++)\n {\n c = c + (b != 7 + 2 * i);\n b = b + 2;\n } #pragma omp parallel for simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-14.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)", "context_chars": 100, "text": " + 2;\n }\n if (c || i != 64 || b != 7 + 64 * 2)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 64; i += 4)\n {\n c = c + (b != 7 + i / 4 * 3);\n b = b + 3;\n } #pragma omp parallel for simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-14.c", "omp_pragma_line": "#pragma omp parallel for simd lastprivate (i, j) collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": " + 3;\n }\n if (c || i != 64 || b != 7 + 16 * 3)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 8; i++)\n for (j = 0; j < 8; j++)\n {\n\tc = c + (b != 7 + 2 * j + 2 * 8 * i);\n\tb = b + 2;\n } #pragma omp parallel for simd lastprivate (i, j) collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-14.c", "omp_pragma_line": "#pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": " }\n if (c || i != 8 || j != 8 || b != 7 + 64 * 2)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 8; i++)\n for (j = 0; j < 8; j++)\n {\n\tc = c + (b != 7 + 2 * j + 2 * 8 * i);\n\tb = b + 2;\n } #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr52547.c", "omp_pragma_line": "#pragma omp parallel for reduction(|:e)", "context_chars": 100, "text": "\n{\n return fn (x);\n}\n\n__attribute__((noinline, noclone)) int\nfoo (int x, int *y)\n{\n int i, e = 0;\nfor (i = 0; i < x; ++i)\n {\n __label__ lab;\n int bar (int *z) { return z - y; }\n if (baz (&y[i], bar) != i)\n\te |= 1;\n } #pragma omp parallel for reduction(|:e)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "string.h>\n\nint\ntest1 (void)\n{\n short int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": ";\n return 0;\n}\n\nint\ntest2 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (static, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (static, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": ";\n return 0;\n}\n\nint\ntest3 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (dynamic, 3)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, 3)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n} #pragma omp parallel for schedule (dynamic, 3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": ";\n return 0;\n}\n\nint\ntest4 (void)\n{\n int buf[64], *p;\n int i;\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[10]; &buf[54] > p; p++)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[3]; &buf[63] >= p; p += 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[51] > p; p = 4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[53]; &buf[9] < p; --p)\n *p = 5;\n for (i = 0; i < 64; i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "i++)\n if (buf[i] != 5 * (i >= 10 && i < 54))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[63]; &buf[3] <= p; p -= 2)\n p[-2] = 6;\n for (i = 0; i < 64; i++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "++)\n if (buf[i] != 6 * ((i & 1) && i <= 61))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[48]; &buf[15] < p; p = -4 + p)\n p[2] = 7;\n for (i = 0; i < 64; i++)\n if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\n#pragma omp parallel for schedule (runtime)\n for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/loop-11.c", "omp_pragma_line": "#pragma omp parallel for schedule (runtime)", "context_chars": 100, "text": "[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))\n abort ();\n memset (buf, '\\0', sizeof (buf));\nfor (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)\n p[2] = -7;\n for (i = 0; i < 64; i++)\n if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))\n abort ();\n return 0;\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n omp_set_schedule (omp_sched_static, 0);\n test4 ();\n omp_set_schedule (omp_sched_static, 3);\n test4 ();\n omp_set_schedule (omp_sched_dynamic, 5);\n test4 ();\n omp_set_schedule (omp_sched_guided, 2);\n test4 ();\n return 0;\n} #pragma omp parallel for schedule (runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/reduction-12.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[-1:2][:][0:2], z[t + 2:4]) \\", "context_chars": 100, "text": " struct A *y, struct D w[1][2], int s, int t)\n{\n struct C a[9] = {};\n short b[5] = {};\n int i;\n reduction(*:y[-s:3]) reduction(|:a[s + 3:4]) \\\n\t\t\t reduction(&:w[s + 1:1][t:2]) reduction(maxb:b[2:])\n for (i = 0; i < 128; i++)\n {\n x[i / 64 - 1][i % 3][(i / 4) & 1].t += i;\n if ((i & 15) == 1)\n\ty[1].t *= 3;\n if ((i & 31) == 2)\n\ty[2].t *= 7;\n if ((i & 63) == 3)\n\ty[3].t *= 17;\n z[i / 32 + 2].t += (i & 3);\n if (i < 4)\n\tz[i + 2].t += i;\n a[i / 32 + 2].t |= 1ULL << (i & 30);\n w[0][i & 1].t &= ~(1L << (i / 17 * 3));\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[-1:2][:][0:2], z[t + 2:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \"-std=gnu99\" } */\n\nextern void abort (void);\n\nint n = 20;\n\nint\nmain (void)\n{\n int a[n], b[n][n];\n\nfor (int i = 0; i < n; i++)\n {\n\ta[i] = i + 1;\n#pragma omp parallel for\n\tfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "int a[n], b[n][n];\n\n#pragma omp parallel for\n for (int i = 0; i < n; i++)\n {\n\ta[i] = i + 1;\nfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n }\n\n for (int i = 0; i < n; i++)\n {\n for (int j = 0; j < n; j++)\n\tif (b[i][j] != i + 1)\n\t abort ();\n if (a[i] != i + 1)\n\tabort ();\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for shared (n, a, b)", "context_chars": 100, "text": "t j = 0; j < n; j++)\n\tif (b[i][j] != i + 1)\n\t abort ();\n if (a[i] != i + 1)\n\tabort ();\n }\n\nfor (int i = 0; i < n; i++)\n {\n\ta[i] = i + 3;\n#pragma omp parallel for\n\tfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n } #pragma omp parallel for shared (n, a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "}\n\n#pragma omp parallel for shared (n, a, b)\n for (int i = 0; i < n; i++)\n {\n\ta[i] = i + 3;\nfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n }\n\n for (int i = 0; i < n; i++)\n {\n for (int j = 0; j < n; j++)\n\tif (b[i][j] != i + 3)\n\t abort ();\n if (a[i] != i + 3)\n\tabort ();\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t j = 0; j < n; j++)\n\tif (b[i][j] != i + 3)\n\t abort ();\n if (a[i] != i + 3)\n\tabort ();\n }\n\nfor (int i = 0; i < n; i++)\n {\n\ta[i] = i + 5;\n#pragma omp parallel for shared (n, a, b)\n\tfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for shared (n, a, b)", "context_chars": 100, "text": ")\n\tabort ();\n }\n\n#pragma omp parallel for\n for (int i = 0; i < n; i++)\n {\n\ta[i] = i + 5;\nfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n }\n\n for (int i = 0; i < n; i++)\n {\n for (int j = 0; j < n; j++)\n\tif (b[i][j] != i + 5)\n\t abort ();\n if (a[i] != i + 5)\n\tabort ();\n } #pragma omp parallel for shared (n, a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for shared (n, a, b)", "context_chars": 100, "text": "t j = 0; j < n; j++)\n\tif (b[i][j] != i + 5)\n\t abort ();\n if (a[i] != i + 5)\n\tabort ();\n }\n\nfor (int i = 0; i < n; i++)\n {\n\ta[i] = i + 7;\n#pragma omp parallel for shared (n, a, b)\n\tfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n } #pragma omp parallel for shared (n, a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for shared (n, a, b)", "context_chars": 100, "text": "}\n\n#pragma omp parallel for shared (n, a, b)\n for (int i = 0; i < n; i++)\n {\n\ta[i] = i + 7;\nfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n }\n\n for (int i = 0; i < n; i++)\n {\n for (int j = 0; j < n; j++)\n\tif (b[i][j] != i + 7)\n\t abort ();\n if (a[i] != i + 7)\n\tabort ();\n } #pragma omp parallel for shared (n, a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for private (a, b)", "context_chars": 100, "text": "t j = 0; j < n; j++)\n\tif (b[i][j] != i + 7)\n\t abort ();\n if (a[i] != i + 7)\n\tabort ();\n }\n\nfor (int i = 0; i < n; i++)\n {\n\ta[i] = i + 1;\n#pragma omp parallel for\n\tfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n } #pragma omp parallel for private (a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n\n#pragma omp parallel for private (a, b)\n for (int i = 0; i < n; i++)\n {\n\ta[i] = i + 1;\nfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n }\n\n#pragma omp parallel for private (a, b)\n for (int i = 0; i < n; i++)\n {\n\ta[i] = i + 1;\n#pragma omp parallel for private (b)\n\tfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr39154.c", "omp_pragma_line": "#pragma omp parallel for private (a, b)", "context_chars": 100, "text": " {\n\ta[i] = i + 1;\n#pragma omp parallel for\n\tfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n }\n\nfor (int i = 0; i < n; i++)\n {\n\ta[i] = i + 1;\n#pragma omp parallel for private (b)\n\tfor (int j = 0; j < n; j++)\n\t b[i][j] = a[i];\n } #pragma omp parallel for private (a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/collapse-1.c", "omp_pragma_line": "#pragma omp parallel for collapse(4 - 1) schedule(static, 4)", "context_chars": 100, "text": "ib.h>\n\nint\nmain (void)\n{\n int i, j, k, l = 0;\n int a[3][3][3];\n\n memset (a, '\\0', sizeof (a));\n for (i = 0; i < 2; i++)\n for (j = 0; j < 2; j++)\n\tfor (k = 0; k < 2; k++)\n\t a[i][j][k] = i + j * 4 + k * 16;\n #pragma omp parallel\n {\n #pragma omp for collapse(2) reduction(|:l) private(k)\n\tfor (i = 0; i < 2; i++)\n\t for (j = 0; j < 2; j++)\n\t for (k = 0; k < 2; k++)\n\t if (a[i][j][k] != i + j * 4 + k * 16)\n\t\tl = 1;\n } #pragma omp parallel for collapse(4 - 1) schedule(static, 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-15.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": " }\n if (c || i != 8 || j != 8 || b != 7 + 64 * 2)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 64; i++)\n {\n c = c + (b != 7 + 2 * i);\n foo (&b, &i, &j, 2);\n } #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-15.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)", "context_chars": 100, "text": ", 2);\n }\n if (c || i != 64 || b != 7 + 64 * 2)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 64; i += 4)\n {\n c = c + (b != 7 + i / 4 * 3);\n foo (&b, &i, &j, 3);\n } #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-15.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)", "context_chars": 100, "text": ", 3);\n }\n if (c || i != 64 || b != 7 + 16 * 3)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 64; i++)\n {\n c = c + (b != 7 + 2 * i);\n foo (&b, &i, &j, 2);\n } #pragma omp parallel for simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-15.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)", "context_chars": 100, "text": ", 2);\n }\n if (c || i != 64 || b != 7 + 64 * 2)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 64; i += 4)\n {\n c = c + (b != 7 + i / 4 * 3);\n foo (&b, &i, &j, 3);\n } #pragma omp parallel for simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-15.c", "omp_pragma_line": "#pragma omp parallel for simd lastprivate (i, j) collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": ", 3);\n }\n if (c || i != 64 || b != 7 + 16 * 3)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 8; i++)\n for (j = 0; j < 8; j++)\n {\n\tc = c + (b != 7 + 2 * j + 2 * 8 * i);\n\tfoo (&b, &i, &j, 2);\n } #pragma omp parallel for simd lastprivate (i, j) collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-15.c", "omp_pragma_line": "#pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": " }\n if (c || i != 8 || j != 8 || b != 7 + 64 * 2)\n __builtin_abort ();\n i = 4; j = 4; b = 7;\n for (i = 0; i < 8; i++)\n for (j = 0; j < 8; j++)\n {\n\tc = c + (b != 7 + 2 * j + 2 * 8 * i);\n\tfoo (&b, &i, &j, 2);\n } #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr26943-4.c", "omp_pragma_line": "#pragma omp parallel for shared (a, e) firstprivate (b, f) \\", "context_chars": 100, "text": "ion (+:l) \\\n\t\t firstprivate (a, b, c, d, e, f, g, h, j)\n if (k == omp_get_thread_num ())\n {\nlastprivate (c, g) private (d, h) \\\n\t\t\t schedule (static, 1) num_threads (4) \\\n\t\t\t reduction (+:j)\n for (i = 0; i < 4; i++)\n\t{\n\t if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b')\n\t j++;\n\t GOMP_barrier ();\n#pragma omp atomic\n\t a += i;\n\t b += i;\n\t c = i;\n\t d = i;\n#pragma omp atomic\n\t e[0] += i;\n\t f[0] += i;\n\t g[0] = 'g' + i;\n\t h[0] = 'h' + i;\n\t GOMP_barrier ();\n\t if (a != 8 + 6 || b != 12 + i || c != i || d != i)\n\t j += 8;\n\t if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i)\n\t j += 64;\n\t if (h[0] != 'h' + i)\n\t j += 512;\n\t} #pragma omp parallel for shared (a, e) firstprivate (b, f) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr33880.c", "omp_pragma_line": "#pragma omp parallel for num_threads(4)", "context_chars": 100, "text": ");\n\nvoid\ntest1 (void)\n{\n int i = 0, j = 0;\n void bar (void)\n {\n i++;\n j++;\n }\n bar ();\n for (i = 0; i < 100; i++)\n #pragma omp atomic\n\tj += 1;\n if (j != 101)\n abort ();\n #pragma omp parallel for lastprivate(i) num_threads(2)\n for (i = 0; i < 100; i++)\n #pragma omp atomic\n\tj += 1;\n if (i != 100)\n abort ();\n i = 3;\n bar ();\n if (j != 202)\n abort ();\n if (i != 4)\n abort ();\n}\n\nvoid\ntest2 (void)\n{\n int i = -1, j = 99, k, l = 9, m = 0;\n void bar (void)\n {\n i++;\n j++;\n l++;\n m++;\n }\n bar ();\n #pragma omp parallel for num_threads(4)\n for (k = i; k < j; k += l)\n #pragma omp atomic\n\tm += 1;\n bar ();\n if (i != 1 || j != 101 || l != 11 || m != 12)\n abort ();\n} #pragma omp parallel for num_threads(4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr33880.c", "omp_pragma_line": "#pragma omp parallel for lastprivate(i) num_threads(2)", "context_chars": 100, "text": "(4)\n for (i = 0; i < 100; i++)\n #pragma omp atomic\n\tj += 1;\n if (j != 101)\n abort ();\n for (i = 0; i < 100; i++)\n #pragma omp atomic\n\tj += 1;\n if (i != 100)\n abort ();\n i = 3;\n bar ();\n if (j != 202)\n abort ();\n if (i != 4)\n abort ();\n}\n\nvoid\ntest2 (void)\n{\n int i = -1, j = 99, k, l = 9, m = 0;\n void bar (void)\n {\n i++;\n j++;\n l++;\n m++;\n }\n bar ();\n #pragma omp parallel for num_threads(4)\n for (k = i; k < j; k += l)\n #pragma omp atomic\n\tm += 1;\n bar ();\n if (i != 1 || j != 101 || l != 11 || m != 12)\n abort ();\n} #pragma omp parallel for lastprivate(i) num_threads(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr33880.c", "omp_pragma_line": "#pragma omp parallel for num_threads(4)", "context_chars": 100, "text": " j = 99, k, l = 9, m = 0;\n void bar (void)\n {\n i++;\n j++;\n l++;\n m++;\n }\n bar ();\n for (k = i; k < j; k += l)\n #pragma omp atomic\n\tm += 1;\n bar ();\n if (i != 1 || j != 101 || l != 11 || m != 12)\n abort ();\n}\n\nvoid\ntest3 (void)\n{\n int i, j, k, l, m;\n void bar (void)\n {\n #pragma omp parallel for num_threads(4)\n for (i = j; i < k; i += l)\n #pragma omp atomic\n m += 1;\n }\n void baz (void)\n {\n #pragma omp parallel for num_threads(2) lastprivate(i)\n for (i = j; i < k * 2; i += l / 2)\n #pragma omp atomic\n m += 1;\n }\n i = 7;\n j = 0;\n k = 100;\n l = 2;\n m = 0;\n bar ();\n if (j != 0 || k != 100 || l != 2 || m != 50)\n abort ();\n baz ();\n if (i != 200 || j != 0 || k != 100 || l != 2 || m != 250)\n abort ();\n} #pragma omp parallel for num_threads(4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr33880.c", "omp_pragma_line": "#pragma omp parallel for num_threads(4)", "context_chars": 100, "text": "!= 11 || m != 12)\n abort ();\n}\n\nvoid\ntest3 (void)\n{\n int i, j, k, l, m;\n void bar (void)\n {\n for (i = j; i < k; i += l)\n #pragma omp atomic\n m += 1;\n }\n void baz (void)\n {\n #pragma omp parallel for num_threads(2) lastprivate(i)\n for (i = j; i < k * 2; i += l / 2)\n #pragma omp atomic\n m += 1;\n } #pragma omp parallel for num_threads(4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr33880.c", "omp_pragma_line": "#pragma omp parallel for num_threads(2) lastprivate(i)", "context_chars": 100, "text": " for (i = j; i < k; i += l)\n #pragma omp atomic\n m += 1;\n }\n void baz (void)\n {\n for (i = j; i < k * 2; i += l / 2)\n #pragma omp atomic\n m += 1;\n }\n i = 7;\n j = 0;\n k = 100;\n l = 2;\n m = 0;\n bar ();\n if (j != 0 || k != 100 || l != 2 || m != 50)\n abort ();\n baz ();\n if (i != 200 || j != 0 || k != 100 || l != 2 || m != 250)\n abort ();\n}\n\nvoid\ntest4 (void)\n{\n int i, j, k, l, m = 0;\n int foo (void)\n {\n return j;\n }\n int bar (void)\n {\n return k;\n }\n int baz (void)\n {\n return l;\n }\n j = 0;\n k = 1000;\n l = 2;\n #pragma omp parallel for num_threads(8) lastprivate(i)\n for (i = foo (); i < bar (); i += baz ())\n #pragma omp atomic\n m += 1;\n if (i != 1000 || m != 500 || j != 0 || k != 1000 || l != 2)\n abort ();\n} #pragma omp parallel for num_threads(2) lastprivate(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr33880.c", "omp_pragma_line": "#pragma omp parallel for num_threads(8) lastprivate(i)", "context_chars": 100, "text": "(void)\n {\n return k;\n }\n int baz (void)\n {\n return l;\n }\n j = 0;\n k = 1000;\n l = 2;\n for (i = foo (); i < bar (); i += baz ())\n #pragma omp atomic\n m += 1;\n if (i != 1000 || m != 500 || j != 0 || k != 1000 || l != 2)\n abort ();\n}\n\nint\nmain (void)\n{\n test1 ();\n test2 ();\n test3 ();\n test4 ();\n return 0;\n} #pragma omp parallel for num_threads(8) lastprivate(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/reduction-11.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[-1:2][:][0:2], z[t + 2:4]) \\", "context_chars": 100, "text": "int *y, long w[1][2], int s, int t)\n{\n unsigned long long a[9] = {};\n short b[5] = {};\n int i;\n reduction(*:y[-s:3]) reduction(|:a[s + 3:4]) \\\n\t\t\t reduction(&:w[s + 1:1][t:2]) reduction(max:b[2:])\n for (i = 0; i < 128; i++)\n {\n x[i / 64 - 1][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[1] *= 3;\n if ((i & 31) == 2)\n\ty[2] *= 7;\n if ((i & 63) == 3)\n\ty[3] *= 17;\n z[i / 32 + 2] += (i & 3);\n if (i < 4)\n\tz[i + 2] += i;\n a[i / 32 + 2] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[-1:2][:][0:2], z[t + 2:4]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/static-chunk-size-one.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3) reduction (+:a) schedule(static, 1)", "context_chars": 100, "text": "extern void abort ();\n\nint\nbar ()\n{\n int a = 0, i;\n\nfor (i = 0; i < 10; i++)\n a += i;\n\n return a;\n}\n\nint\nmain (void)\n{\n int res;\n res = bar ();\n if (res != 45)\n abort ();\n return 0;\n} #pragma omp parallel for num_threads (3) reduction (+:a) schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr58756.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: c)", "context_chars": 100, "text": "id);\nint d[32 * 32];\n\n__attribute__((noinline, noclone)) int\nfoo (int a, int b)\n{\n int j, c = 0;\n for (j = 0; j < a; j += 32)\n {\n\tint l;\n\t#pragma omp simd reduction(+: c) safelen(1)\n\t for (l = 0; l < b; ++l)\n\t c += d[j + l];\n } #pragma omp parallel for reduction(+: c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr58756.c", "omp_pragma_line": "#pragma omp parallel for simd reduction(+: c) safelen(1)", "context_chars": 100, "text": "+ l];\n }\n return c;\n}\n\n__attribute__((noinline, noclone)) int\nbar (int a)\n{\n int j, c = 0;\n for (j = 0; j < a; ++j)\n c += d[j];\n return c;\n}\n\n__attribute__((noinline)) static int\nbaz (int a)\n{\n int j, c = 0;\n #pragma omp simd reduction(+: c) safelen(1)\n for (j = 0; j < a; ++j)\n c += d[j];\n return c;\n} #pragma omp parallel for simd reduction(+: c) safelen(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-17.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": " * i);\n\tfoo (&b, &i, &j, 2);\n }\n if (c || b != 7 + 64 * 2)\n __builtin_abort ();\n b = 7;\n for (int i = 0; i < 64; i++)\n {\n c = c + (b != 7 + 2 * i);\n foo (&b, &i, &i, 2);\n } #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-17.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)", "context_chars": 100, "text": "i);\n foo (&b, &i, &i, 2);\n }\n if (c || b != 7 + 64 * 2)\n __builtin_abort ();\n b = 7;\n for (int i = 0; i < 64; i += 4)\n {\n c = c + (b != 7 + i / 4 * 3);\n foo (&b, &i, &i, 3);\n } #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/simd-17.c", "omp_pragma_line": "#pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)", "context_chars": 100, "text": "3);\n foo (&b, &i, &i, 3);\n }\n if (c || b != 7 + 16 * 3)\n __builtin_abort ();\n b = 7;\n for (int i = 0; i < 8; i++)\n for (int j = 0; j < 8; j++)\n {\n\tc = c + (b != 7 + 2 * j + 2 * 8 * i);\n\tfoo (&b, &i, &j, 2);\n } #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-2.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (static)", "context_chars": 100, "text": "j2 + 1))\n abort ();\n}\n\nvoid\ntest9 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = j1; i <= k1; ++i)\n {\n if (i < j2 || i > k2)\n\t++e;\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-2.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (static)", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest10 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = k1; i >= j1; --i)\n {\n if (i < j2 || i > k2)\n\t++e;\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-2.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (static, 1)", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest11 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = j1; i <= k1; ++i)\n {\n if (i < j2 || i > k2)\n\t++e;\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-2.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (static, 1)", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest12 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = k1; i >= j1; --i)\n {\n if (i < j2 || i > k2)\n\t++e;\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-2.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (static) ordered", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest13 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = j1; i <= k1; ++i)\n {\n if (i < j2 || i > k2)\n\t++e;\n#pragma omp ordered\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (static) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-2.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (static) ordered", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest14 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = k1; i >= j1; --i)\n {\n if (i < j2 || i > k2)\n\t++e;\n#pragma omp ordered\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (static) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-2.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest15 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = j1; i <= k1; ++i)\n {\n if (i < j2 || i > k2)\n\t++e;\n#pragma omp ordered\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr29947-2.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered", "context_chars": 100, "text": "2 + 1))\n abort ();\n}\n\nvoid\ntest16 (long j1, long k1, long j2, long k2)\n{\n long i, e = 0, c = 0;\nfor (i = k1; i >= j1; --i)\n {\n if (i < j2 || i > k2)\n\t++e;\n#pragma omp ordered\n ++c;\n } #pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/reduction-13.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x[-1:p1 + 1][:p2], z[t + 2:p3]) \\", "context_chars": 100, "text": " int i;\n for (i = 0; i < p7 + 4; i++)\n {\n if (i < p7)\n\tb[i] = -6;\n a[i] = 0;\n }\n reduction(*:y[-s:p4]) reduction(|:a[s + 3:p5]) \\\n\t\t\t reduction(&:w[s + 1:p6 - 1][t:p6]) reduction(max:b[2:])\n for (i = 0; i < 128; i++)\n {\n x[i / 64 - 1][i % 3][(i / 4) & 1] += i;\n if ((i & 15) == 1)\n\ty[1] *= 3;\n if ((i & 31) == 2)\n\ty[2] *= 7;\n if ((i & 63) == 3)\n\ty[3] *= 17;\n z[i / 32 + 2] += (i & 3);\n if (i < 4)\n\tz[i + 2] += i;\n a[i / 32 + 2] |= 1ULL << (i & 30);\n w[0][i & 1] &= ~(1L << (i / 17 * 3));\n if ((i % 23) > b[2])\n\tb[2] = i % 23;\n if ((i % 85) > b[3])\n\tb[3] = i % 85;\n if ((i % 192) > b[4])\n\tb[4] = i % 192;\n } #pragma omp parallel for reduction(+:x[-1:p1 + 1][:p2], z[t + 2:p3]) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/omp_workshare3.c", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": "[N];\n\n/* Some initializations */\nfor (i=0; i < N; i++)\n a[i] = b[i] = i * 1.0;\nchunk = CHUNKSIZE;\n\nshared(a,b,c,chunk) \\\n private(i,tid) \\\n schedule(static,chunk)\n {\t\t\t\t/* { dg-error \"expected\" } */\n tid = omp_get_thread_num();\n for (i=0; i < N; i++)\n {\n c[i] = a[i] + b[i];\n printf(\"tid= %d i= %d c[i]= %f\\n\", tid, i, c[i]);\n }\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr66199-3.c", "omp_pragma_line": "#pragma omp parallel for lastprivate (d) default(none) firstprivate (a, b) shared(u, v, w)", "context_chars": 100, "text": "1024], v[1024], w[1024];\n\n__attribute__((noinline, noclone)) long\nf1 (long a, long b)\n{\n long d;\n for (d = a; d < b; d++)\n u[d] = v[d] + w[d];\n return d;\n}\n\n__attribute__((noinline, noclone)) long\nf2 (long a, long b, long c)\n{\n long d, e;\n #pragma omp parallel for lastprivate (d) default(none) firstprivate (a, b) shared(u, v, w) linear(c:5) lastprivate(e)\n for (d = a; d < b; d++)\n {\n u[d] = v[d] + w[d];\n c += 5;\n e = c;\n }\n return d + c + e;\n} #pragma omp parallel for lastprivate (d) default(none) firstprivate (a, b) shared(u, v, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr66199-3.c", "omp_pragma_line": "#pragma omp parallel for lastprivate (d) default(none) firstprivate (a, b) shared(u, v, w) linear(c:5) lastprivate(e)", "context_chars": 100, "text": " return d;\n}\n\n__attribute__((noinline, noclone)) long\nf2 (long a, long b, long c)\n{\n long d, e;\n for (d = a; d < b; d++)\n {\n u[d] = v[d] + w[d];\n c += 5;\n e = c;\n } #pragma omp parallel for lastprivate (d) default(none) firstprivate (a, b) shared(u, v, w) linear(c:5) lastprivate(e)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr66199-3.c", "omp_pragma_line": "#pragma omp parallel for default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) lastprivate(d1, d2) collapse(2)", "context_chars": 100, "text": "\n__attribute__((noinline, noclone)) long\nf3 (long a1, long b1, long a2, long b2)\n{\n long d1, d2;\n for (d1 = a1; d1 < b1; d1++)\n for (d2 = a2; d2 < b2; d2++)\n u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2];\n return d1 + d2;\n}\n\nint\nmain ()\n{\n if (f1 (0, 1024) != 1024\n || f2 (0, 1024, 17) != 1024 + 2 * (17 + 5 * 1024)\n || f3 (0, 32, 0, 32) != 64)\n __builtin_abort ();\n return 0;\n} #pragma omp parallel for default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) lastprivate(d1, d2) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr66199-1.c", "omp_pragma_line": "#pragma omp parallel for simd default(none) firstprivate (a, b) shared(u, v, w)", "context_chars": 100, "text": "1024], v[1024], w[1024];\n\n__attribute__((noinline, noclone)) long\nf1 (long a, long b)\n{\n long d;\n for (d = a; d < b; d++)\n u[d] = v[d] + w[d];\n return d;\n}\n\n__attribute__((noinline, noclone)) long\nf2 (long a, long b, long c)\n{\n long d, e;\n #pragma omp parallel for simd default(none) firstprivate (a, b) shared(u, v, w) linear(d) linear(c:5) lastprivate(e)\n for (d = a; d < b; d++)\n {\n u[d] = v[d] + w[d];\n c += 5;\n e = c;\n }\n return d + c + e;\n} #pragma omp parallel for simd default(none) firstprivate (a, b) shared(u, v, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr66199-1.c", "omp_pragma_line": "#pragma omp parallel for simd default(none) firstprivate (a, b) shared(u, v, w) linear(d) linear(c:5) lastprivate(e)", "context_chars": 100, "text": " return d;\n}\n\n__attribute__((noinline, noclone)) long\nf2 (long a, long b, long c)\n{\n long d, e;\n for (d = a; d < b; d++)\n {\n u[d] = v[d] + w[d];\n c += 5;\n e = c;\n } #pragma omp parallel for simd default(none) firstprivate (a, b) shared(u, v, w) linear(d) linear(c:5) lastprivate(e)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr66199-1.c", "omp_pragma_line": "#pragma omp parallel for simd default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) lastprivate(d1, d2) collapse(2)", "context_chars": 100, "text": "\n__attribute__((noinline, noclone)) long\nf3 (long a1, long b1, long a2, long b2)\n{\n long d1, d2;\n for (d1 = a1; d1 < b1; d1++)\n for (d2 = a2; d2 < b2; d2++)\n u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2];\n return d1 + d2;\n}\n\n__attribute__((noinline, noclone)) long\nf4 (long a1, long b1, long a2, long b2)\n{\n long d1, d2;\n #pragma omp parallel for simd default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) collapse(2)\n for (d1 = a1; d1 < b1; d1++)\n for (d2 = a2; d2 < b2; d2++)\n u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2];\n return d1 + d2;\n} #pragma omp parallel for simd default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) lastprivate(d1, d2) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/pr66199-1.c", "omp_pragma_line": "#pragma omp parallel for simd default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) collapse(2)", "context_chars": 100, "text": "\n__attribute__((noinline, noclone)) long\nf4 (long a1, long b1, long a2, long b2)\n{\n long d1, d2;\n for (d1 = a1; d1 < b1; d1++)\n for (d2 = a2; d2 < b2; d2++)\n u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2];\n return d1 + d2;\n}\n\nint\nmain ()\n{\n if (f1 (0, 1024) != 1024\n || f2 (0, 1024, 17) != 1024 + 2 * (17 + 5 * 1024)\n || f3 (0, 32, 0, 32) != 64\n || f4 (0, 32, 0, 32) != 64)\n __builtin_abort ();\n return 0;\n} #pragma omp parallel for simd default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/appendix-a/a.16.1.c", "omp_pragma_line": "#pragma omp parallel for shared(x, y, index, n)", "context_chars": 100, "text": "at\nwork2 (int i)\n{\n return 2.0 * i;\n}\n\nvoid\na16 (float *x, float *y, int *index, int n)\n{\n int i;\nfor (i = 0; i < n; i++)\n {\n#pragma omp atomic\n x[index[i]] += work1 (i);\n y[i] += work2 (i);\n } #pragma omp parallel for shared(x, y, index, n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/appendix-a/a.21.1.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule(dynamic)", "context_chars": 100, "text": "k)\n{\n#pragma omp ordered\n printf (\" %d\\n\", k);\n}\n\nvoid\na21 (int lb, int ub, int stride)\n{\n int i;\nfor (i = lb; i < ub; i += stride)\n work (i);\n}\n\nint\nmain ()\n{\n a21 (0, 100, 5);\n return 0;\n} #pragma omp parallel for ordered schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_data-1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n #pragma omp target data map(to: v1[0:N], v2[:N]) map(from: p[0:N])\n #pragma omp target\n for (i = 0; i < N; i++)\n\t p[i] = v1[i] * v2[i];\n}\n\nint main ()\n{\n long long *p1 = (long long *) malloc (MAX * sizeof (long long));\n long long *p2 = (long long *) malloc (MAX * sizeof (long long));\n long long *v1 = (long long *) malloc (MAX * sizeof (long long));\n long long *v2 = (long long *) malloc (MAX * sizeof (long long));\n\n init (v1, v2, MAX);\n\n vec_mult_ref (p1, v1, v2, MAX);\n vec_mult (p2, v1, v2, MAX);\n\n check (p1, p2, MAX);\n\n free (p1);\n free (p2);\n free (v1);\n free (v2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_data-4.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double *v4, int N)\n{\n int i;\n #pragma omp target map(to: v3[0:N], v4[:N]) map(from: p1[0:N])\n for (i = 0; i < N; i++)\n\tp1[i] = v3[i] * v4[i];\n}\n\nvoid foo (double *p0, double *v1, double *v2, int N)\n{\n init (v1, v2, N);\n\n #pragma omp target data map(to: v1[0:N], v2[:N]) map(from: p0[0:N])\n vec_mult (p0, v1, v2, N);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r *p)\n{\n int i;\n char v1[N], v2[N];\n\n init (v1, v2);\n\n #pragma omp target map(from: p[0:N])\n for (i = 0; i < N; i++)\n\tp[i] = v1[i] * v2[i];\n}\n\nint main ()\n{\n char p1[N], p2[N];\n char v1[N], v2[N];\n\n init (v1, v2);\n\n vec_mult_ref (p1);\n vec_mult (p2);\n\n check (p1, p2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/declare_target-4.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tmp)", "context_chars": 100, "text": "tmp;\n}\n\nfloat accum (int k)\n{\n int i;\n float tmp = 0.0;\n\n #pragma omp target map(tofrom:tmp)\n for (i = 0; i < N; i++)\n\ttmp += Pfun (i, k);\n\n return tmp;\n}\n\nvoid check (float a, float b)\n{\n float err = (b == 0.0) ? a : (a - b) / b;\n if (((err > 0) ? err : -err) > EPS)\n abort ();\n} #pragma omp parallel for reduction(+:tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_update-1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " #pragma omp target data map(to: v1[:N], v2[:N]) map(from: p[0:N])\n {\n #pragma omp target\n\tfor (i = 0; i < N; i++)\n\t p[i] = v1[i] * v2[i];\n\n init_again (v1, v2, N);\n\n #pragma omp target update to(v1[:N], v2[:N])\n\n #pragma omp target\n\t#pragma omp parallel for\n\t for (i = 0; i < N; i++)\n\t p[i] = p[i] + (v1[i] * v2[i]);\n }\n}\n\nint main ()\n{\n int *p1 = (int *) malloc (MAX * sizeof (int));\n int *p2 = (int *) malloc (MAX * sizeof (int));\n int *v1 = (int *) malloc (MAX * sizeof (int));\n int *v2 = (int *) malloc (MAX * sizeof (int));\n\n vec_mult_ref (p1, v1, v2, MAX);\n vec_mult (p2, v1, v2, MAX);\n\n check (p1, p2, MAX);\n\n free (p1);\n free (p2);\n free (v1);\n free (v2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_update-1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t_again (v1, v2, N);\n\n #pragma omp target update to(v1[:N], v2[:N])\n\n #pragma omp target\n\tfor (i = 0; i < N; i++)\n\t p[i] = p[i] + (v1[i] * v2[i]);\n }\n}\n\nint main ()\n{\n int *p1 = (int *) malloc (MAX * sizeof (int));\n int *p2 = (int *) malloc (MAX * sizeof (int));\n int *v1 = (int *) malloc (MAX * sizeof (int));\n int *v2 = (int *) malloc (MAX * sizeof (int));\n\n vec_mult_ref (p1, v1, v2, MAX);\n vec_mult (p2, v1, v2, MAX);\n\n check (p1, p2, MAX);\n\n free (p1);\n free (p2);\n free (v1);\n free (v2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/declare_target-3.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "];\n}\n\nvoid vec_mult ()\n{\n int i;\n\n #pragma omp target update to(v1, v2)\n\n #pragma omp target\n for (i = 0; i < N; i++)\n\tp2[i] = v1[i] * v2[i];\n\n #pragma omp target update from(p2)\n}\n\nint main ()\n{\n init ();\n\n vec_mult_ref ();\n vec_mult ();\n\n check ();\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/async_target-1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (C = 0; C < N; C += CHUNKSZ)\n {\n #pragma omp task\n\t#pragma omp target map(Z[C:CHUNKSZ])\n\t for (i = C; i < C + CHUNKSZ; i++)\n\t Z[i] = F (Z[i]);\n }\n #pragma omp taskwait\n}\n\nvoid init ()\n{\n int i;\n for (i = 0; i < N; i++)\n Y[i] = Z[i] = 0.1 * i;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target-1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ult (int *p)\n{\n int i;\n int v1[N], v2[N];\n\n init (v1, v2);\n\n #pragma omp target map(p[0:N])\n for (i = 0; i < N; i++)\n\tp[i] = v1[i] * v2[i];\n}\n\nint main ()\n{\n int p1[N], p2[N];\n int v1[N], v2[N];\n\n init (v1, v2);\n\n vec_mult_ref (p1);\n vec_mult (p2);\n\n check (p1, p2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/teams-2.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " \\\n\t\t reduction(+:sum)\n #pragma omp distribute\n\tfor (i0 = 0; i0 < n; i0 += block_size)\n\t for (i = i0; i < ((i0 + block_size > n) ? n : i0 + block_size); i++)\n\t sum += B[i] * C[i];\n\n return sum;\n}\n\nvoid check (float a, float b)\n{\n float err = (b == 0.0) ? a : (a - b) / b;\n if (((err > 0) ? err : -err) > EPS)\n abort ();\n} #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_data-6.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rget if (N > THRESHOLD) map(to: v1[:N], v2[:N])\n\t{\n\t if (omp_is_initial_device ())\n\t abort;\n\n\t for (i = 0; i < N; i++)\n\t p[i] = v1[i] * v2[i];\n\t}\n\n init_again (v1, v2, N);\n\n #pragma omp target if (N > THRESHOLD) map(to: v1[:N], v2[:N])\n\t{\n\t if (omp_is_initial_device ())\n\t abort ();\n\n\t #pragma omp parallel for\n\t for (i = 0; i < N; i++)\n\t p[i] = p[i] + (v1[i] * v2[i]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_data-6.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t if (N > THRESHOLD) map(to: v1[:N], v2[:N])\n\t{\n\t if (omp_is_initial_device ())\n\t abort ();\n\n\t for (i = 0; i < N; i++)\n\t p[i] = p[i] + (v1[i] * v2[i]);\n\t}\n }\n}\n\nint main ()\n{\n float *p1 = (float *) malloc (MAX * sizeof (float));\n float *p2 = (float *) malloc (MAX * sizeof (float));\n float *v1 = (float *) malloc (MAX * sizeof (float));\n float *v2 = (float *) malloc (MAX * sizeof (float));\n\n vec_mult_ref (p1, v1, v2, MAX);\n vec_mult (p2, v1, v2, MAX);\n\n check (p1, p2, MAX);\n\n free (p1);\n free (p2);\n free (v1);\n free (v2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target-4.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ble *v1, double *v2)\n{\n int i;\n #pragma omp target map(to: v1[0:N], v2[:N]) map(from: p[0:N])\n for (i = 0; i < N; i++)\n\tp[i] = v1[i] * v2[i];\n}\n\nint main ()\n{\n double p1[N], p2[N];\n double v1[N], v2[N];\n\n init (v1, v2);\n\n vec_mult_ref (p1, v1, v2);\n vec_mult (p2, v1, v2);\n\n check (p1, p2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/async_target-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " omp target map(to: v1, v2) map(from: p[0:n])\n {\n\tif (omp_is_initial_device ())\n\t abort ();\n\n\tfor (i = 0; i < n; i++)\n\t p[i] = v1[i] * v2[i];\n\n\t free (v1);\n\t free (v2);\n }\n\n #pragma omp taskwait\n}\n\nvoid check (float *a, float *b, int n)\n{\n int i;\n for (i = 0 ; i < n ; i++)\n {\n float err = (a[i] == 0.0) ? b[i] : (b[i] - a[i]) / a[i];\n if (((err > 0) ? err : -err) > EPS)\n\tabort ();\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target-3.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n int i;\n long long v1[N], v2[N];\n\n init (v1, v2);\n\n #pragma omp target map(v1, v2, p[0:N])\n for (i = 0; i < N; i++)\n\tp[i] = v1[i] * v2[i];\n}\n\nint main ()\n{\n long long p1[N], p2[N];\n long long v1[N], v2[N];\n\n init (v1, v2);\n\n vec_mult_ref (p1);\n vec_mult (p2);\n\n check (p1, p2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_data-3.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tmp)", "context_chars": 100, "text": ":COLS)\n for (k = 0; k < cols; k++)\n {\n\tint tmp = 0;\n\n\t#pragma omp target map(tofrom:tmp)\n\t for (i = 0; i < rows; i++)\n\t tmp += (Q[i][k] * Q[i][k]);\n\n\t#pragma omp target\n\t #pragma omp parallel for\n\t for (i = 0; i < rows; i++)\n\t Q[i][k] *= tmp;\n }\n}\n\nint main ()\n{\n int (*Q1)[COLS] = (int(*)[COLS]) malloc (ROWS * COLS * sizeof (int));\n int (*Q2)[COLS] = (int(*)[COLS]) malloc (ROWS * COLS * sizeof (int));\n\n init (Q1, ROWS, COLS);\n init (Q2, ROWS, COLS);\n\n gramSchmidt_ref (Q1, ROWS, COLS);\n gramSchmidt (Q2, ROWS, COLS);\n\n check (Q1, Q2, ROWS, COLS);\n\n free (Q1);\n free (Q2);\n\n return 0;\n} #pragma omp parallel for reduction(+:tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_data-3.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "n(+:tmp)\n\t for (i = 0; i < rows; i++)\n\t tmp += (Q[i][k] * Q[i][k]);\n\n\t#pragma omp target\n\t for (i = 0; i < rows; i++)\n\t Q[i][k] *= tmp;\n }\n}\n\nint main ()\n{\n int (*Q1)[COLS] = (int(*)[COLS]) malloc (ROWS * COLS * sizeof (int));\n int (*Q2)[COLS] = (int(*)[COLS]) malloc (ROWS * COLS * sizeof (int));\n\n init (Q1, ROWS, COLS);\n init (Q2, ROWS, COLS);\n\n gramSchmidt_ref (Q1, ROWS, COLS);\n gramSchmidt (Q2, ROWS, COLS);\n\n check (Q1, Q2, ROWS, COLS);\n\n free (Q1);\n free (Q2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_update-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " data map(to: v1[:N], v2[:N]) map(from: p[0:N])\n {\n int changed;\n\n #pragma omp target\n\tfor (i = 0; i < N; i++)\n\t p[i] = v1[i] * v2[i];\n\n changed = maybe_init_again (v1, N);\n #pragma omp target update if (changed) to(v1[:N])\n\n changed = maybe_init_again (v2, N);\n #pragma omp target update if (changed) to(v2[:N])\n\n #pragma omp target\n\t#pragma omp parallel for\n\t for (i = 0; i < N; i++)\n\t p[i] = p[i] + (v1[i] * v2[i]);\n }\n}\n\nint main ()\n{\n int *p = (int *) malloc (MAX * sizeof (int));\n int *p1 = (int *) malloc (MAX * sizeof (int));\n int *v1 = (int *) malloc (MAX * sizeof (int));\n int *v2 = (int *) malloc (MAX * sizeof (int));\n\n vec_mult_ref (p, v1, v2, MAX);\n vec_mult (p1, v1, v2, MAX);\n\n check (p, p1, MAX);\n\n free (p);\n free (p1);\n free (v1);\n free (v2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_update-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t_again (v2, N);\n #pragma omp target update if (changed) to(v2[:N])\n\n #pragma omp target\n\tfor (i = 0; i < N; i++)\n\t p[i] = p[i] + (v1[i] * v2[i]);\n }\n}\n\nint main ()\n{\n int *p = (int *) malloc (MAX * sizeof (int));\n int *p1 = (int *) malloc (MAX * sizeof (int));\n int *v1 = (int *) malloc (MAX * sizeof (int));\n int *v2 = (int *) malloc (MAX * sizeof (int));\n\n vec_mult_ref (p, v1, v2, MAX);\n vec_mult (p1, v1, v2, MAX);\n\n check (p, p1, MAX);\n\n free (p);\n free (p1);\n free (v1);\n free (v2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_data-7.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "get if (N > THRESHOLD) map(to: v1[:N], v2[:N])\n {\n\tif (omp_is_initial_device ())\n\t abort ();\n\tfor (i = 0; i < N; i++)\n\t p[i] = v1[i] * v2[i];\n }\n}\n\nint main ()\n{\n short *p1 = (short *) malloc (MAX * sizeof (short));\n short *p2 = (short *) malloc (MAX * sizeof (short));\n short *v1 = (short *) malloc (MAX * sizeof (short));\n short *v2 = (short *) malloc (MAX * sizeof (short));\n\n init (v1, v2, MAX);\n\n vec_mult_ref (p1, v1, v2, MAX);\n vec_mult (p2, v1, v2, MAX);\n\n check (p1, p2, MAX);\n\n free (p1);\n free (p2);\n free (v1);\n free (v2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/declare_target-5.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tmp)", "context_chars": 100, "text": "n tmp;\n}\n\nfloat accum ()\n{\n int i, k;\n float tmp = 0.0;\n\n #pragma omp target map(tofrom:tmp)\n for (i = 0; i < N; i++)\n\t{\n\t float tmp1 = 0.0;\n\n\t #pragma omp simd reduction(+:tmp1)\n\t for (k = 0; k < M; k++)\n\t tmp1 += Pfun(i,k);\n\n\t tmp += tmp1;\n\t} #pragma omp parallel for reduction(+:tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target-5.c", "omp_pragma_line": "#pragma omp parallel for if(N > THRESHOLD2)", "context_chars": 100, "text": ":N], v2[:N]) \\\n\t\t map(from: p[0:N])\n {\n if (omp_is_initial_device ())\n\tabort ();\n\n for (i = 0; i < N; i++)\n\t p[i] = v1[i] * v2[i];\n }\n}\n\nint main ()\n{\n float p1[N], p2[N];\n float v1[N], v2[N];\n\n init (v1, v2);\n\n vec_mult_ref (p1, v1, v2);\n vec_mult (p2, v1, v2);\n\n check (p1, p2);\n\n return 0;\n} #pragma omp parallel for if(N > THRESHOLD2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_data-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " #pragma omp target data map(from: p[0:N])\n {\n #pragma omp target map(to: v1[:N], v2[:N])\n\tfor (i = 0; i < N; i++)\n\t p[i] = v1[i] * v2[i];\n\n init_again (v1, v2, N);\n\n #pragma omp target map(to: v1[:N], v2[:N])\n\t#pragma omp parallel for\n\t for (i = 0; i < N; i++)\n\t p[i] = p[i] + (v1[i] * v2[i]);\n }\n}\n\nint main ()\n{\n char *p1 = (char *) malloc (MAX * sizeof (char));\n char *p2 = (char *) malloc (MAX * sizeof (char));\n char *v1 = (char *) malloc (MAX * sizeof (char));\n char *v2 = (char *) malloc (MAX * sizeof (char));\n\n vec_mult_ref (p1, v1, v2, MAX);\n vec_mult (p2, v1, v2, MAX);\n\n check (p1, p2, MAX);\n\n free (p1);\n free (p2);\n free (v1);\n free (v2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c/examples-4/target_data-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = v1[i] * v2[i];\n\n init_again (v1, v2, N);\n\n #pragma omp target map(to: v1[:N], v2[:N])\n\tfor (i = 0; i < N; i++)\n\t p[i] = p[i] + (v1[i] * v2[i]);\n }\n}\n\nint main ()\n{\n char *p1 = (char *) malloc (MAX * sizeof (char));\n char *p2 = (char *) malloc (MAX * sizeof (char));\n char *v1 = (char *) malloc (MAX * sizeof (char));\n char *v2 = (char *) malloc (MAX * sizeof (char));\n\n vec_mult_ref (p1, v1, v2, MAX);\n vec_mult (p2, v1, v2, MAX);\n\n check (p1, p2, MAX);\n\n free (p1);\n free (p2);\n free (v1);\n free (v2);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/andyfratello/PAR/Laboratori/Lab4/codesLab4/multisort-optional2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " basicsort(n, data);\n }\n}\n\nstatic void initialize(long length, T data[length]) {\n long i;\n for (i = 0; i < length; i++) {\n if (i==0) {\n data[i] = rand();\n } else {\n data[i] = ((data[i-1]+1) * i * 104723L) % N;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/andyfratello/PAR/Laboratori/Lab4/codesLab4/multisort-optional2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* 104723L) % N;\n }\n }\n}\n\nstatic void clear(long length, T data[length]) {\n long i;\n for (i = 0; i < length; i++) {\n data[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/sine_x_estimation.c", "omp_pragma_line": "#pragma omp parallel for shared(sum)", "context_chars": 100, "text": " double itime, ftime, timetaken;\n itime = omp_get_wtime();\n\n rads = x * (3.1415 / 180);\n\n for (i = 1; i <= 15; i++)\n {\n intm_value = pow(rads, j) / fact(j);\n sign = 1;\n if (i % 2 == 0)\n {\n sign = -1;\n }\n sum += (sign * intm_value);\n j = j + 2;\n } #pragma omp parallel for shared(sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/armstrong_num.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) private(num,res,rem)", "context_chars": 100, "text": "rds = omp_get_max_threads();\n omp_set_num_threads(max_thrds);\n int i, j, num, flag,rem,res;\n\n\tfor (i = 2; i < 1000; i++)\n\t{\n\t\trem=0,res=0;\n\t\tnum=i;\n\t\twhile (num != 0) {\n\t\t\trem = num % 10;\n\t\t\tres += (rem * rem * rem);\n\t\t\tnum /= 10;\n\t\t}\n\t\tif (res == i){\n\t\t\tint tid = omp_get_thread_num();\n\t\t\tprintf(\"%d is Armstrong number from thread-%d\\n\",i,tid);\n\t\t}\t\t\t \n\t} #pragma omp parallel for schedule(dynamic) private(num,res,rem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/pi_estimation_integration.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) reduction(+: area) private(x, y, i, dx)", "context_chars": 100, "text": " itime = omp_get_wtime();\n int iters = 10000000;\n dx = 1.0 / (double)iters;\n x = dx;\n\n for (i = 1; i < iters; i++)\n {\n dx = 1.0 / (double)iters;\n x = (double)i * dx;\n y = sqrt((double)1.0 - (pow(x, 2)));\n area += dx * y;\n } #pragma omp parallel for schedule(dynamic) reduction(+: area) private(x, y, i, dx)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/cosine_x_estimation.c", "omp_pragma_line": "#pragma omp parallel for shared(sum)", "context_chars": 100, "text": " ftime, timetaken;\n itime = omp_get_wtime();\n\n rads = x * (3.1415 / 180);\n sum = 1.0;\n\n for (i = 1; i <= 15; i++)\n {\n intm_value = pow(rads, j) / fact(j);\n sign = 1;\n if (i % 2 != 0)\n {\n sign = -1;\n }\n sum += (sign * intm_value);\n j = j + 2;\n } #pragma omp parallel for shared(sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/histogram_plotting.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "me = omp_get_wtime();\n\n\t// Generating random float data\n\tprintf(\"Random Float data\\n\");\n\tsrand(0);\n\tfor (i = 0; i < n; i++)\n\t{\n\t\tfloat random = ((float)rand() / (float)(RAND_MAX));\n\t\tfloat_arr[i] = random;\n\t\tprintf(\"%f \", random);\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/histogram_plotting.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndom = ((float)rand() / (float)(RAND_MAX));\n\t\tfloat_arr[i] = random;\n\t\tprintf(\"%f \", random);\n\t}\n\n\tfor (i = 0; i <= bins; i++)\n\t\thist[i] = 0;\n\n\n\t//Dividing float data into bins\n\t\n\t#pragma omp parallel for schedule(dynamic) private(i, j)\n\tfor (i = 0; i < n; i++)\n\t{\n\t\tfor (j = 1; j <= bins; j++)\n\t\t{\n\t\t\tfloat bin_max = (float)j / (float)bins;\n\t\t\tif (float_arr[i] <= bin_max)\n\t\t\t{\n\t\t\t\thist[j]++;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/histogram_plotting.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) private(i, j)", "context_chars": 100, "text": " omp parallel for\n\tfor (i = 0; i <= bins; i++)\n\t\thist[i] = 0;\n\n\n\t//Dividing float data into bins\n\t\n\tfor (i = 0; i < n; i++)\n\t{\n\t\tfor (j = 1; j <= bins; j++)\n\t\t{\n\t\t\tfloat bin_max = (float)j / (float)bins;\n\t\t\tif (float_arr[i] <= bin_max)\n\t\t\t{\n\t\t\t\thist[j]++;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for schedule(dynamic) private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/prime_num_sum.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) shared(primesum, arr) private(num, j, flag)", "context_chars": 100, "text": "tial Array Declaration:\\n\");\n for (i = 0; i < 6; i++){\n printf(\"%d \", arr[i]);\n }\n\n for (i = 0; i < 6; i++){\n num = arr[i];\n flag = 1;\n for (j = 2; j < num; j++){\n if (num % j == 0){\n flag = 0;\n }\n }\n if (flag == 1){\n #pragma omp critical\n primesum = primesum + num;\n }\n } #pragma omp parallel for schedule(dynamic) shared(primesum, arr) private(num, j, flag)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Sorting/selection_sort.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) private(j)", "context_chars": 100, "text": "e();\n\t\n\t// Selection Sort\n\tint i, j, t, min_idx;\n\tfor (i = 0; i < n; i++)\n\t{\n min_idx = i;\n\t\tfor (j = i; j < n; j++)\n\t\t{\n\t\t\tif (arr[j] < arr[min_idx]){\n\t\t\t\tmin_idx=j;\n\t\t\t}\n\t\t} #pragma omp parallel for schedule(dynamic) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Sorting/bucket_sort.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "intf(\"%d \", arr[x]);\n\t}\n\titime = omp_get_wtime();\n\n\tint i, b, bi = -1, bj = -1, bk = -1, bl = -1;\n\n\tfor (i = 0; i < n; i++)\n\t{\n\t\tb = arr[i] / numbuc;\n\t\tif (b == 0)\n\t\t{\n\t\t\tbucket[b][++bi] = arr[i];\n\t\t}\n\t\telse if (b == 1)\n\t\t{\n\t\t\tbucket[b][++bj] = arr[i];\n\t\t}\n\t\telse if (b == 2)\n\t\t{\n\t\t\tbucket[b][++bk] = arr[i];\n\t\t}\n\t\tif (b == 3)\n\t\t{\n\t\t\tbucket[b][++bl] = arr[i];\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Matrices/matrix_sum.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " printf(\"%d \", mat2[i][j]);\n }\n printf(\"\\n\");\n }\n\n omp_set_num_threads(5);\n for (i = 0; i < 3; i++){\n for (j = 0; j < 3; j++){\n sum[i][j] = mat1[i][j] + mat2[i][j];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Matrices/matrix_difference.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " printf(\"%d \", mat2[i][j]);\n }\n printf(\"\\n\");\n }\n\n omp_set_num_threads(5);\n for (i = 0; i < 3; i++){\n for (j = 0; j < 3; j++){\n diff[i][j] = mat1[i][j] - mat2[i][j];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Matrices/matrix_col_sum.c", "omp_pragma_line": "#pragma omp parallel for shared(col_sum)", "context_chars": 100, "text": " printf(\"%d \", mat[i][j]);\n }\n printf(\"\\n\");\n }\n\n omp_set_num_threads(5);\n\tfor (i = 0; i < 3; i++){\n col_sum[i] = 0;\n for (j = 0; j < 3; j++){\n col_sum[i] += mat[j][i];\n }\n } #pragma omp parallel for shared(col_sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Matrices/matrix_multiplication.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "intf(\"%d \", mat2[i][j]);\n }\n printf(\"\\n\");\n }\n \n omp_set_num_threads(5);\n for (i = 0; i < 3; i++){\n for (j = 0; j < 3; j++){\n sum[i][j] = 0;\n for (k = 0; k < 3; k++){\n sum[i][j] += mat1[i][k] * mat2[k][j];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Matrices/matrix_row_sum.c", "omp_pragma_line": "#pragma omp parallel for shared(row_sum)", "context_chars": 100, "text": " printf(\"%d \", mat[i][j]);\n }\n printf(\"\\n\");\n }\n\n omp_set_num_threads(15);\n\tfor (i = 0; i < 3; i++){\n row_sum[i] = 0;\n for (j = 0; j < 3; j++){\n row_sum[i] += mat[i][j];\n }\n } #pragma omp parallel for shared(row_sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Constructs/ordered_construct.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule(dynamic)", "context_chars": 100, "text": " = {1, 2, 3, 4, 5};\n int i;\n\n printf(\"\\nPrinting array elements using ordered construct\\n\");\n\tfor (i = 0; i < 5; i++)\n {\n\t\t#pragma omp ordered\n printf(\"%d \", arr[i]);\n } #pragma omp parallel for ordered schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Constructs/atomic_construct.c", "omp_pragma_line": "#pragma omp parallel for shared(x)", "context_chars": 100, "text": "nInitial Declaration:\\n\");\n for (i = 0; i < 5; i++){\n printf(\"%d \", stack[i]);\n }\n\n for(i = 0; i < 5; i++)\n {\n #pragma omp atomic write\n x[i] = x[0]*2*i;\n } #pragma omp parallel for shared(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Arrays/array_linear_search.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "0; i < n; i++)\n {\n printf(\"%d \",arr[i]);\n }\n printf(\"\\nTarget : %d\",target);\n\n \n\tfor (i = 0; i < n; i++)\n {\n if (arr[i]==target){\n\t\t\tindex=i;\n\t\t}\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Arrays/array_factorial.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " printf(\"\\nArray of number:\\n\");\n for(i=0;i<10;i++){\n printf(\"%d \",arr[i]);\n }\n\n for(i=0;i<10;i++)\n {\n num = arr[i];\n fact=1;\n for(j=1;j<=num;j++)\n {\n fact*=j;\n }\n printf(\"\\nFactorial of %d is %d from thread-%d\",num,fact,omp_get_thread_num());\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Arrays/operation_on_array.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule(dynamic)", "context_chars": 100, "text": "tial Array Declaration:\\n\");\n for (i = 0; i < 5; i++){\n printf(\"%d \", arr[i]);\n }\n\n for (i = 0; i < 5; i++){\n arr[i] = arr[i] * 5;\n arr[i] = arr[i] + 3;\n } #pragma omp parallel for ordered schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Arrays/array_evenodd_count.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) shared(e, o)", "context_chars": 100, "text": "ial Array Declaration:\\n\");\n for (i = 0; i < 10; i++){\n printf(\"%d \", arr[i]);\n }\n\n for (i = 0; i < 10; i++){\n if (arr[i] % 2 == 0)\n e++;\n else\n o++;\n } #pragma omp parallel for schedule(dynamic) shared(e, o)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Arrays/array_squares_cubes_sum.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) shared(ssquares, scubes)", "context_chars": 100, "text": "tial Array Declaration:\\n\");\n for (i = 0; i < 9; i++){\n printf(\"%d \", arr[i]);\n }\n\n for (i = 0; i < 9; i++){\n ssquares += pow(arr[i], 2);\n scubes += pow(arr[i], 3);\n } #pragma omp parallel for schedule(dynamic) shared(ssquares, scubes)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Arrays/array_sum.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) shared(sum)", "context_chars": 100, "text": "tial Array Declaration:\\n\");\n for (i = 0; i < 5; i++){\n printf(\"%d \", arr[i]);\n }\n\n for (i = 0; i < 5; i++){\n sum += arr[i];\n } #pragma omp parallel for schedule(dynamic) shared(sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Defcon27/Parallel-Computing-in-C-using-OpenMP/Arrays/array_min_max.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic)", "context_chars": 100, "text": "tial Array Declaration:\\n\");\n for (i = 0; i < 9; i++){\n printf(\"%d \", arr[i]);\n }\n\n for (i = 0; i < 9; i++){\n if (arr[i] > max_)\n max_ = arr[i];\n if ((arr[i]) < min_)\n min_ = arr[i];\n } #pragma omp parallel for schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/relu/relu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s);\n int nthreads = omp_get_num_threads();\n printf(\"Number of threads = %d\\n\", nthreads);\n for(int idx=0; idx0 ? x[idx] : 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/Mattson_OMP_exercise/mandel.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(c,eps)", "context_chars": 100, "text": "ntains the Mandelbrot set,\n// testing each point to see whether it is inside or outside the set.\n\nfor (i=0; i #pragma omp parallel for default(shared) private(c,eps)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/Mattson_OMP_exercise/pi_mc.c", "omp_pragma_line": "#pragma omp parallel for private(x,y,test) reduction(+:Ncirc)", "context_chars": 100, "text": "rcle. Side of squrare is 2*r \n\n seed(-r, r); // The circle and square are centered at the origin\nfor(i=0;i #pragma omp parallel for private(x,y,test) reduction(+:Ncirc)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/Mattson_OMP_exercise/solutions/matmul_par.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, i, j, k) ", "context_chars": 100, "text": "im; j++)\n\t\t\t*(C+(i*Ndim+j)) = 0.0;\n\t\n\t\tstart_time = omp_get_wtime();\n\n\t/* Do the matrix product */\n\nfor (i=0; i #pragma omp parallel for private(tmp, i, j, k) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/Mattson_OMP_exercise/solutions/mandel_par.c", "omp_pragma_line": "#pragma omp parallel for default(shared) firstprivate(eps) private(c, j)", "context_chars": 100, "text": "\n// testing each point to see whether it is inside or outside the set.\n omp_set_num_threads(4);\nfor (i=0; i #pragma omp parallel for default(shared) firstprivate(eps) private(c, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/vec_add/vec_add.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "threads = omp_get_num_threads();\n printf(\"Number of threads = %d\\n\", nthreads);\n\n // init\n for(idx=0; idx #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/vec_add/vec_add.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lel for\n for(idx=0; idxfor(idx=0; idx #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/mat_mult/mat_mult.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " //int nthreads = omp_get_num_threads();\n //printf(\"Number of threads = %d\\n\", nthreads);\n for(register int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/mat_mult/mat_mult.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "%d\\n\", nthreads);\n #pragma omp parallel for\n for(register int i=0; ifor(register int j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/mat_mult/mat_mult.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n //#pragma omp parallel for\n for(register int j=0; jregister DTYPE *a0p = &A(i, 0);\n register DTYPE *a1p = &A(i+1, 0);\n register DTYPE *a2p = &A(i+2, 0);\n register DTYPE *a3p = &A(i+3, 0);\n register DTYPE bp0 = B(0, j);\n\n for(register int p=0; p #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/map_reduce/map_reduce.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " return;\n}\n\nDTYPE *init(DTYPE *in, const int len, const DTYPE val)\n{\n assert(in && len>0);\n for(int idx=0; idx #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/map_reduce/map_reduce.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " DTYPE *in, register DTYPE *out, register const int len)\n{\n assert(f && in && out && len>0);\n for(register int idx=0; idx #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/map_reduce/map_reduce.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ter const DTYPE *in, register const int len)\n{\n assert(f && in && len>0);\n DTYPE res = 0;\n for(register int idx=0; idx #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/pi/my_pi.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:cur_pi)", "context_chars": 100, "text": " - 1/15 ...\ndouble calc_pi_gregory(const long max_iter_times)\n{\n register double cur_pi = 0;\n for(register int idx=1; idx<=max_iter_times; idx+=2)\n {\n cur_pi += (idx>>1 & 1) ? -4./idx : 4./idx;\n } #pragma omp parallel for reduction(+:cur_pi)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/pi/my_pi.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:cur_pi)", "context_chars": 100, "text": "n pi;\n}\n\ndouble calc_pi_nilakantha(const long max_iter_times)\n{\n register double cur_pi = 3;\n for(register int idx=2; idx<=max_iter_times; idx+=2)\n {\n cur_pi += (idx>>1 & 1) ? 4./(idx*(idx+1)*(idx+2)) : -4./(idx*(idx+1)*(idx+2));//TODO \n //printf(\"%d %d %d \\n\", idx, idx+1, idx+2);\n } #pragma omp parallel for reduction(+:cur_pi)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ysh329/OpenMP-101/pooling/pooling.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rd_pooling(layer_t *l)\n{\n assert(l);\n int h_offset = -l->pad;\n int w_offset = -l->pad;\n for(int b=0; boutput_shape[0]; ++b)\n {\n for(int k=0; koutput_shape[1]; ++k)\n {\n for(int i=0; ioutput_shape[2]; ++i)\n {\n for(int j=0; joutput_shape[3]; ++j)\n {\n int out_idx = OUT_IDX(b, k, i, j);\n int max_idx = -1;\n DTYPE max = -FLT_MAX;\n for(int n=0; nksize; ++n)\n {\n for(int m=0; mksize; ++m)\n {\n int cur_h = i*l->stride + h_offset + n;\n int cur_w = j*l->stride + w_offset + m;\n int in_idx = IN_IDX(b, k, cur_h, cur_w);\n\n int valid = (cur_h>=0 && cur_houtput[2] &&\n cur_w>=0 && cur_woutput[3]);\n DTYPE val = (valid!=0) ? l->input[in_idx] : -FLT_MAX;\n\n max = (val > max) ? val : max;\n max_idx = (val > max) ? in_idx: max_idx;\n }\n }\n l->output[out_idx] = max;\n // l->indexes[out_idx] = max_idx; // record for backprop\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/speglich/offloading-to-nvme/src/non-mpi/gradient.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) num_threads(nthreads)", "context_chars": 100, "text": "MER(section1,timers)\n /* End section1 */\n\n /* Begin read section */\n START_TIMER(read)\n for(int i= u_vec->size[1]-1;i>=0;i--)\n {\n int tid = i%nthreads;\n\n off_t offset = counters[tid] * u_size;\n lseek(files[tid], -1 * offset, SEEK_END);\n\n int ret = read(files[tid], u[t0][i], u_size);\n\n if (ret != u_size) {\n printf(\"%d\", ret);\n perror(\"Cannot open output file\");\n exit(1);\n }\n\n counters[tid]++;\n } #pragma omp parallel for schedule(static,1) num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/speglich/offloading-to-nvme/src/non-mpi/forward.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) num_threads(nthreads)", "context_chars": 100, "text": "R(section2,timers)\n /* End section2 */\n\n /* Begin write section */\n START_TIMER(write)\n for(int i=0; i < u_vec->size[1];i++)\n {\n int tid = i%nthreads;\n int ret = write(files[tid], u[t0][i], u_size);\n if (ret != u_size) {\n perror(\"Cannot open output file\");\n exit(1);\n }\n } #pragma omp parallel for schedule(static,1) num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/speglich/offloading-to-nvme/src/mpi/gradient.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) num_threads(nthreads)", "context_chars": 100, "text": "MER(section1,timers)\n /* End section1 */\n\n /* Begin read section */\n START_TIMER(read)\n for(int i= u_vec->size[1]-1;i>=0;i--)\n {\n int tid = i%nthreads;\n\n off_t offset = counters[tid] * u_size;\n lseek(files[tid], -1 * offset, SEEK_END);\n\n int ret = read(files[tid], u[t0][i], u_size);\n\n if (ret != u_size) {\n printf(\"%d\", ret);\n perror(\"Cannot open output file\");\n exit(1);\n }\n\n counters[tid]++;\n } #pragma omp parallel for schedule(static,1) num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/speglich/offloading-to-nvme/src/mpi/forward.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1)", "context_chars": 100, "text": "R(section2,timers)\n /* End section2 */\n\n /* Begin write section */\n START_TIMER(write)\n for(int i=0; i < u_vec->size[1];i++)\n {\n int tid = i%nthreads;\n int ret = write(files[tid], u[t0][i], u_size);\n if (ret != u_size) {\n perror(\"Write size mismatch with u_size\");\n exit(1);\n }\n } #pragma omp parallel for schedule(static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/speglich/offloading-to-nvme/src/gradient/gradient.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) num_threads(nthreads)", "context_chars": 100, "text": "timers)\n /* End section1 */\n\n struct timeval start, end;\n gettimeofday(&start, NULL);\n\n for(int i= u_vec->size[1]-1;i>=0;i--)\n {\n int tid = i%nthreads;\n\n off_t offset = counters[tid] * u_size;\n lseek(files[tid], -1 * offset, SEEK_END);\n\n int ret = read(files[tid], u[time][i], u_size);\n\n if (ret != u_size) {\n printf(\"%d\", ret);\n perror(\"Cannot open output file\");\n exit(1);\n }\n\n counters[tid]++;\n } #pragma omp parallel for schedule(static,1) num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/speglich/offloading-to-nvme/src/gradient/forward.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) num_threads(nthreads)", "context_chars": 100, "text": "}\n\n /* End section2 */\n START_TIMER(section2)\n /* Begin section3 */\n int t2 = time;\n for(int i=0; i < u_vec->size[1];i++)\n {\n int tid = i%nthreads;\n int ret = write(files[tid], u[t2][i], u_size);\n\n if (ret != u_size) {\n perror(\"Cannot open output file\");\n exit(1);\n }\n } #pragma omp parallel for schedule(static,1) num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/speglich/offloading-to-nvme/src/gradient/compression/gradient.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) num_threads(nthreads)", "context_chars": 100, "text": "timers)\n /* End section1 */\n\n struct timeval start, end;\n gettimeofday(&start, NULL);\n\n for(int i= u_vec->size[1]-1;i>=0;i--)\n {\n int tid = i%nthreads;\n\n zfp_type type = zfp_type_float;\n zfp_field* field = zfp_field_1d(u[t2][i], type, u_vec->size[2]);\n\n zfp_stream* zfp = zfp_stream_open(NULL);\n\n zfp_stream_set_rate(zfp, 8, type, zfp_field_dimensionality(field), zfp_false);\n //zfp_stream_set_reversible(zfp);\n //zfp_stream_set_precision(zfp, 1e-3);\n\n off_t bufsize = zfp_stream_maximum_size(zfp, field);\n void* buffer = malloc(bufsize);\n\n bitstream* stream = stream_open(buffer, bufsize);\n\n zfp_stream_set_bit_stream(zfp, stream);\n zfp_stream_rewind(zfp);\n\n int slice = spt[tid];\n\n offset[tid] += slices_size[tid][slice];\n\n lseek(files[tid], -1 * offset[tid], SEEK_END);\n\n int ret = read(files[tid], buffer, slices_size[tid][slice]);\n\n if (ret != slices_size[tid][slice]) {\n printf(\"%zu\\n\", offset[tid]);\n perror(\"Cannot open output file\");\n exit(1);\n }\n\n if (!zfp_decompress(zfp, field)) {\n printf(\"decompression failed\\n\");\n exit(1);\n }\n\n zfp_field_free(field);\n zfp_stream_close(zfp);\n stream_close(stream);\n free(buffer);\n spt[tid]--;\n } #pragma omp parallel for schedule(static,1) num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/speglich/offloading-to-nvme/src/compression/non-mpi/gradient.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) num_threads(nthreads)", "context_chars": 100, "text": ",timers)\n /* End section1 */\n\n /* Begin decompress section */\n START_TIMER(decompress)\n for(int i= u_vec->size[1]-1;i>=0;i--)\n {\n int tid = i%nthreads;\n\n zfp_type type = zfp_type_float;\n zfp_field* field = zfp_field_2d(u[t2][i], type, u_vec->size[2],u_vec->size[3]);\n\n zfp_stream* zfp = zfp_stream_open(NULL);\n\n zfp_stream_set_rate(zfp, RATE, type, zfp_field_dimensionality(field), zfp_false);\n //zfp_stream_set_reversible(zfp);\n //zfp_stream_set_precision(zfp, 1e-3);\n\n off_t bufsize = zfp_stream_maximum_size(zfp, field);\n void* buffer = malloc(bufsize);\n\n bitstream* stream = stream_open(buffer, bufsize);\n\n zfp_stream_set_bit_stream(zfp, stream);\n zfp_stream_rewind(zfp);\n\n int slice = spt[tid];\n\n offset[tid] += slices_size[tid][slice];\n\n lseek(files[tid], -1 * offset[tid], SEEK_END);\n\n int ret = read(files[tid], buffer, slices_size[tid][slice]);\n read_size += slices_size[tid][slice];\n\n if (ret != slices_size[tid][slice]) {\n printf(\"%zu\\n\", offset[tid]);\n perror(\"Cannot open output file\");\n exit(1);\n }\n\n if (!zfp_decompress(zfp, field)) {\n printf(\"decompression failed\\n\");\n exit(1);\n }\n\n zfp_field_free(field);\n zfp_stream_close(zfp);\n stream_close(stream);\n free(buffer);\n spt[tid]--;\n } #pragma omp parallel for schedule(static,1) num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/speglich/offloading-to-nvme/src/compression/non-mpi/forward.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) num_threads(nthreads)", "context_chars": 100, "text": "\n /* Begin compress Section */\n START_TIMER(compress)\n zfp_type type = zfp_type_float;\n for(int i=0; i < u_vec->size[1];i++)\n {\n int tid = i%nthreads;\n\n zfp_field* field = zfp_field_2d(u[t0][i], type, u_vec->size[2], u_vec->size[3]);\n zfp_stream* zfp = zfp_stream_open(NULL);\n\n zfp_stream_set_rate(zfp, RATE, type, zfp_field_dimensionality(field), zfp_false);\n //zfp_stream_set_reversible(zfp);\n //zfp_stream_set_accuracy(zfp, 1e-8);\n\n size_t bufsize = zfp_stream_maximum_size(zfp, field);\n\n void* buffer = malloc(bufsize);\n bitstream* stream = stream_open(buffer, bufsize);\n\n zfp_stream_set_bit_stream(zfp, stream);\n zfp_stream_rewind(zfp);\n\n size_t zfpsize = zfp_compress(zfp, field);\n\n if (!zfpsize) {\n fprintf(stderr, \"compression failed\\n\");\n exit(1);\n }\n\n write(files[tid], buffer, zfpsize);\n write(metas[tid], &zfpsize, sizeof(size_t));\n write_size += zfpsize;\n\n zfp_field_free(field);\n zfp_stream_close(zfp);\n stream_close(stream);\n free(buffer);\n } #pragma omp parallel for schedule(static,1) num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGMultiOpenMPTarget.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:red)", "context_chars": 100, "text": "et + length; i++) {\n vectorDotResults[d] += a[i] * b[i];\n }\n\n#else\n{\n floatType red = 0;\nfor (int i = offset; i < offset + length; i++) {\n red += a[i] * b[i];\n } #pragma omp parallel for reduction(+:red)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "bi) {}\n};\n\nvoid CGOpenMP::MatrixCRSOpenMP::allocatePtr(int rows) {\n MatrixCRS::allocatePtr(rows);\n\nfor (int i = 0; i < rows + 1; i++) {\n ptr[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "::MatrixCRSOpenMP::allocateIndexAndValue(int values) {\n MatrixCRS::allocateIndexAndValue(values);\n\nfor (int i = 0; i < N; i++) {\n for (int j = ptr[i]; j < ptr[i + 1]; j++) {\n index[j] = 0;\n value[j] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "}\n}\n\nvoid CGOpenMP::MatrixELLOpenMP::allocateLength(int rows) {\n MatrixELL::allocateLength(rows);\n\nfor (int i = 0; i < rows; i++) {\n length[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "}\n}\n\nvoid CGOpenMP::MatrixELLOpenMP::allocateIndexAndData() {\n MatrixELL::allocateIndexAndData();\n\nfor (int i = 0; i < N; i++) {\n for (int j = 0; j < length[i]; j++) {\n int k = j * N + i;\n index[k] = 0;\n data[k] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ata[k] = 0.0;\n }\n }\n}\n\nvoid CGOpenMP::JacobiOpenMP::allocateC(int N) {\n Jacobi::allocateC(N);\n\nfor (int i = 0; i < N; i++) {\n C[i] = 0.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ew floatType[N]);\n if (preconditioner != PreconditionerNone) {\n z.reset(new floatType[N]);\n }\n\nfor (int i = 0; i < N; i++) {\n p[i] = 0.0;\n q[i] = 0.0;\n r[i] = 0.0;\n if (preconditioner != PreconditionerNone) {\n z[i] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "econditionerNone) {\n z[i] = 0.0;\n }\n }\n}\n\nvoid CGOpenMP::allocateK() {\n CG::allocateK();\n\nfor (int i = 0; i < N; i++) {\n k[i] = 0.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r (int i = 0; i < N; i++) {\n k[i] = 0.0;\n }\n}\n\nvoid CGOpenMP::allocateX() {\n CG::allocateX();\n\nfor (int i = 0; i < N; i++) {\n x[i] = 0.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ector _dst, Vector _src) {\n floatType *dst = getVector(_dst);\n floatType *src = getVector(_src);\n\nfor (int i = 0; i < N; i++) {\n dst[i] = src[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " N; i++) {\n dst[i] = src[i];\n }\n}\n\nvoid CGOpenMP::matvecKernelCRS(floatType *x, floatType *y) {\nfor (int i = 0; i < N; i++) {\n floatType tmp = 0;\n for (int j = matrixCRS->ptr[i]; j < matrixCRS->ptr[i + 1]; j++) {\n tmp += matrixCRS->value[j] * x[matrixCRS->index[j]];\n }\n y[i] = tmp;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndex[j]];\n }\n y[i] = tmp;\n }\n}\n\nvoid CGOpenMP::matvecKernelELL(floatType *x, floatType *y) {\nfor (int i = 0; i < N; i++) {\n floatType tmp = 0;\n for (int j = 0; j < matrixELL->length[i]; j++) {\n int k = j * N + i;\n tmp += matrixELL->data[k] * x[matrixELL->index[k]];\n }\n y[i] = tmp;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "oatType a, Vector _x, Vector _y) {\n floatType *x = getVector(_x);\n floatType *y = getVector(_y);\n\nfor (int i = 0; i < N; i++) {\n y[i] += a * x[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ctor _x, floatType a, Vector _y) {\n floatType *x = getVector(_x);\n floatType *y = getVector(_y);\n\nfor (int i = 0; i < N; i++) {\n y[i] = x[i] + a * y[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:res)", "context_chars": 100, "text": " Vector _b) {\n floatType res = 0;\n floatType *a = getVector(_a);\n floatType *b = getVector(_b);\n\nfor (int i = 0; i < N; i++) {\n res += a[i] * b[i];\n } #pragma omp parallel for reduction(+:res)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hahnjo/CGxx/openmp/CGOpenMP.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n }\n\n return res;\n}\n\nvoid CGOpenMP::applyPreconditionerKernelJacobi(floatType *x, floatType *y) {\nfor (int i = 0; i < N; i++) {\n y[i] = jacobi->C[i] * x[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jimouris/parallel-convolution/mpi_omp/mpi_omp_conv.c", "omp_pragma_line": "#pragma omp parallel for shared(src, dst) schedule(static) collapse(3)", "context_chars": 100, "text": "col_to, int width, int height, float** h, color_t imageType) {\n\tint i, j;\n\tif (imageType == GREY) {\nfor (i = row_from ; i <= row_to ; i++)\n\t\t\tfor (j = col_from ; j <= col_to ; j++)\n\t\t\t\tconvolute_grey(src, dst, i, j, width+2, height, h);\n\t} else if (imageType == RGB) {\n#pragma omp parallel for shared(src, dst) schedule(static) collapse(3)\n\t\tfor (i = row_from ; i <= row_to ; i++)\n\t\t\tfor (j = col_from ; j <= col_to ; j++)\n\t\t\t\tconvolute_rgb(src, dst, i, j*3, width*3+6, height, h);\n\t} #pragma omp parallel for shared(src, dst) schedule(static) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jimouris/parallel-convolution/mpi_omp/mpi_omp_conv.c", "omp_pragma_line": "#pragma omp parallel for shared(src, dst) schedule(static) collapse(3)", "context_chars": 100, "text": "l_to ; j++)\n\t\t\t\tconvolute_grey(src, dst, i, j, width+2, height, h);\n\t} else if (imageType == RGB) {\nfor (i = row_from ; i <= row_to ; i++)\n\t\t\tfor (j = col_from ; j <= col_to ; j++)\n\t\t\t\tconvolute_rgb(src, dst, i, j*3, width*3+6, height, h);\n\t} \n}\n\nvoid convolute_grey(uint8_t *src, uint8_t *dst, int x, int y, int width, int height, float** h) {\n\tint i, j, k, l;\n\tfloat val = 0;\n\tfor (i = x-1, k = 0 ; i <= x+1 ; i++, k++)\n\t\tfor (j = y-1, l = 0 ; j <= y+1 ; j++, l++)\n\t\t\tval += src[width * i + j] * h[k][l];\n\tdst[width * x + y] = val;\n} #pragma omp parallel for shared(src, dst) schedule(static) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/MetalheadKen/RayTracingInOneWeekend/main_opt.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:point_x, point_y, point_z) default(none) firstprivate(i, j, nx, ny, ns, cam, world) schedule(dynamic, 1)", "context_chars": 100, "text": "int_y = 0.0, point_z = 0.0;\n\n /* Sampling ns times per pixel area */\n for (int s = 0; s < ns; s++) {\n /* Centered at the center point of the pixel, the pixel outward distance is (0.0, 1.0] */\n float u = (float) (i + drand48()) / (float) nx;\n float v = (float) (j + drand48()) / (float) ny;\n\n /* Obtain the color value of the random sampling point in this pixel area */\n Ray r = cam.get_ray(cam, u, v);\n // Vector p = r.point_at_parameter(r, 2.0);\n\n /* Accumulate the color values of all ns random sample points of this point area */\n Vector temp = color(r, &world, 0);\n point_x += temp.point.x;\n point_y += temp.point.y;\n point_z += temp.point.z;\n } #pragma omp parallel for reduction(+:point_x, point_y, point_z) default(none) firstprivate(i, j, nx, ny, ns, cam, world) schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PetropoulakisPanagiotis/parallel-convolution/convolution/parallel_convolution.c", "omp_pragma_line": "#pragma omp parallel for num_threads(NUM_THREADS) collapse(2) schedule(static, (my_width - 2) * (my_height - 2) / NUM_THREADS)", "context_chars": 100, "text": "r pixels first */\n //////////////////////////////////\n\n #ifdef ENABLE_OPEN_MP\n for(i = 2; i < my_height; i++){ // For every inner row\n for(j = 2 * mult; j < my_width; j++){ // and every inner column\n\n /* Compute the new value of the current pixel */\n (*im_after)[i][j] = (int)((*im_before)[i][j] * my_args.filter[1][1] +\n (*im_before)[i - 1][j] * my_args.filter[0][1] +\n (*im_before)[i - 1][j + mult] * my_args.filter[0][2] +\n (*im_before)[i][j + mult] * my_args.filter[1][2] +\n (*im_before)[i + 1][j + mult] * my_args.filter[2][2] +\n (*im_before)[i + 1][j] * my_args.filter[2][1] +\n (*im_before)[i + 1][j - mult] * my_args.filter[2][0] +\n (*im_before)[i][j - mult] * my_args.filter[1][0] +\n (*im_before)[i - 1][j - mult] * my_args.filter[0][0]);\n\n /* Truncated unexpected values */\n if((*im_after)[i][j] < 0)\n (*im_after)[i][j] = 0;\n else if((*im_after)[i][j] > 255)\n (*im_after)[i][j] = 255;\n } // End for\n } #pragma omp parallel for num_threads(NUM_THREADS) collapse(2) schedule(static, (my_width - 2) * (my_height - 2) / NUM_THREADS)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/reproducers/TBB/tbb_mixed_omp.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nit(4);\n\n // RUN SEQUENTAIL FOR\n std::cerr << \"Running OMP for...\";\n gettimeofday(&sst, NULL);\n for(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/reproducers/MPI/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "MM_WORLD, &rank);\n MPI_Comm_rank (MPI_COMM_WORLD, &local_rank);\n\n MPI_Barrier (MPI_COMM_WORLD);\n\n\nfor (i = 0; i < 10; i++)\n {\n printf (\"P %d i=%d, rank=(%d/%d)\\n\", rank, i, omp_get_thread_num (),\n\t omp_get_num_threads ());\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr66429.c", "omp_pragma_line": "#pragma omp parallel for simd schedule(static, 32) collapse(3)", "context_chars": 100, "text": "d\nnoreturn (void)\n{\n for (;;);\n}\n\n__attribute__ ((noinline, noclone)) void\nfoo (int n)\n{\n int i;\n\nfor (i = 0; i < 10; i++)\n for (int j = n; j < 8; j++)\n for (long k = -10; k < 10; k++)\n\t{\n\t b[i][j][k] += 16;\n\t noreturn ();\n\t b[i][j][k] -= 32;\n\t} #pragma omp parallel for simd schedule(static, 32) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr66429.c", "omp_pragma_line": "#pragma omp parallel for simd schedule(static, 32)", "context_chars": 100, "text": "turn ();\n\t b[i][j][k] -= 32;\n\t}\n}\n\n__attribute__ ((noinline, noclone)) void\nbar (void)\n{\n int i;\n\nfor (i = 0; i < 10; i++)\n {\n b[0][0][i] += 16;\n noreturn ();\n b[0][0][i] -= 32;\n } #pragma omp parallel for simd schedule(static, 32)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/sink-1.c", "omp_pragma_line": "#pragma omp parallel for ordered(2)", "context_chars": 100, "text": " -Wunknown-pragmas -Werror\" } */\n\nextern void bark (void);\nint i,j,k;\nint array[555];\n\nint\nmain()\n{\nfor (i=0; i < 100; ++i)\n for (j=0; j < 100; ++j)\n {\n/* OUT variant does not apply to ORDERED construct. */\n#pragma omp ordered depend(out:i) /* { dg-error \"invalid depend kind\" } */\n\n/* depend(sink...) is allowed without an offset. */\n#pragma omp ordered depend(sink:i,j-1)\n\n#pragma omp ordered depend(sink:i-1,j+2)\n bark ();\n } #pragma omp parallel for ordered(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/sink-1.c", "omp_pragma_line": "#pragma omp parallel for ordered(2)", "context_chars": 100, "text": ");\n\n#pragma omp ordered depend(source) /* { dg-error \"'depend' clause must be closely nested\" } */\n\nfor (i=0; i < 100; ++i)\n for (j=0; j < 100; ++j)\n {\n/* Multiple depend(source) allowed. */\n#pragma omp ordered depend(source)\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/sink-1.c", "omp_pragma_line": "#pragma omp parallel for ordered(2)", "context_chars": 100, "text": "source) allowed. */\n#pragma omp ordered depend(source)\n#pragma omp ordered depend(source)\n }\n\nfor (i=0; i < 100; ++i)\n for (j=0; j < 100; ++j)\n {\n#pragma omp ordered depend(sink:i-2,j-2,k+2) /* { dg-error \"does not match number of iteration var\" } */\n\tbark();\n } #pragma omp parallel for ordered(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/sink-1.c", "omp_pragma_line": "#pragma omp parallel for ordered(2)", "context_chars": 100, "text": "end(sink:i-2,j-2,k+2) /* { dg-error \"does not match number of iteration var\" } */\n\tbark();\n }\n\nfor (i=0; i < 100; ++i)\n for (j=0; j < 100; ++j)\n {\n#pragma omp ordered depend(sink:i-2) /* { dg-error \"does not match number of iteration variables\" } */\n\tbark();\n } #pragma omp parallel for ordered(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/sink-1.c", "omp_pragma_line": "#pragma omp parallel for ordered(2)", "context_chars": 100, "text": "epend(sink:i-2) /* { dg-error \"does not match number of iteration variables\" } */\n\tbark();\n }\n\nfor (i=0; i < 100; ++i)\n for (j=0; j < 100; ++j)\n {\n#pragma omp ordered depend(sink:k,i) /* { dg-error \"is not an iteration\" } */\n\tbark();\n } #pragma omp parallel for ordered(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/sink-1.c", "omp_pragma_line": "#pragma omp parallel for ordered(2)", "context_chars": 100, "text": ",k+1)\n\t bar (i, j, k);\n#pragma omp ordered depend(source)\n\t }\n }\n}\n\nint\nbaz ()\n{\n int i, j;\nfor (i=0; i < 100; ++i)\n for (j=0; j < 100; ++j)\n {\n#pragma omp ordered depend(sink:i-1,j-3)\n\tbar (i, j, 0);\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(a)\n #pragma omp for\n for (i = 0; i < 10; i++)\n if (b) /* { dg-warning \"ambiguous\" } */\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* { dg-warning \"ambiguous\" } */\n #pragma omp taskloop\n for (i = 0; i < 10; i++)\n if (b)\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n\t else\n\t baz ();\n else\n bar ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } */\n for (i = 0; i < 10; i++)\n for (j = 0; j < 10; j++)\n\tif (b)\n\t bar ();\n else\n baz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-1.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": ");\n else\n baz ();\n\n if (a) /* { dg-warning \"ambiguous\" } */\n for (i = 0; i < 10; i++)\n for (j = 0; j < 10; j++)\n\tif (b)\n\t bar ();\n else\n baz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "omp parallel\n\tif (b)\n\t bar ();\n\telse\n\t baz ();\n }\n\n if (a)\n for (i = 0; i < 10; i++)\n for (j = 0; j < 10; j++)\n\t{\n\t if (b)\n\t bar ();\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-1.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "; j++)\n\t{\n\t if (b)\n\t bar ();\n\t}\n else\n baz ();\n\n if (a)\n for (i = 0; i < 10; i++)\n for (j = 0; j < 10; j++)\n\t{\n\t if (b)\n\t bar ();\n\t} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr59152.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 32) collapse(3)", "context_chars": 100, "text": "s \"-fopenmp -fipa-pure-const\" } */\n\nextern int b[];\nvoid\nfoo (void)\n{\n unsigned long v1, v2, v3;\n for (v1 = 0; v1 < 20; v1 += 2)\n for (v2 = __LONG_MAX__; v2 > __LONG_MAX__ - 30; v2 -= 3)\n\tfor (v3 = 10; v3 > 0; v3--)\n\t #pragma omp atomic\n\t b[v3]++;\n}\n\nvoid\nbar (void)\n{\n unsigned long v1, v2, v3;\n #pragma omp parallel for schedule(static) collapse(3)\n for (v1 = 0; v1 < 20; v1 += 2)\n for (v2 = __LONG_MAX__; v2 > __LONG_MAX__ - 30; v2 -= 3)\n\tfor (v3 = 10; v3 > 0; v3--)\n\t #pragma omp atomic\n\t b[v3]++;\n} #pragma omp parallel for schedule(static, 32) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr59152.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) collapse(3)", "context_chars": 100, "text": "3 > 0; v3--)\n\t #pragma omp atomic\n\t b[v3]++;\n}\n\nvoid\nbar (void)\n{\n unsigned long v1, v2, v3;\n for (v1 = 0; v1 < 20; v1 += 2)\n for (v2 = __LONG_MAX__; v2 > __LONG_MAX__ - 30; v2 -= 3)\n\tfor (v3 = 10; v3 > 0; v3--)\n\t #pragma omp atomic\n\t b[v3]++;\n}\n\nvoid\nbaz (void)\n{\n unsigned long v1, v2, v3;\n #pragma omp parallel for schedule(runtime) collapse(3)\n for (v1 = 0; v1 < 20; v1 += 2)\n for (v2 = __LONG_MAX__; v2 > __LONG_MAX__ - 30; v2 -= 3)\n\tfor (v3 = 10; v3 > 0; v3--)\n\t #pragma omp atomic\n\t b[v3]++;\n} #pragma omp parallel for schedule(static) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr67521.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "67521 */\n/* { dg-do compile } */\n/* { dg-options \"-fopenmp\" } */\n\nvoid\nfoo (int x)\n{\n int i = 0;\n for (i = (i & x); i < 10; i = i + 2) /* { dg-error \"initializer expression refers to iteration variable\" } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr67521.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": " = i + 2) /* { dg-error \"initializer expression refers to iteration variable\" } */\n ;\n i = 0;\n for (i = 0; i < (i & x) + 10; i = i + 2) /* { dg-error \"condition expression refers to iteration variable\" } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr67521.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": " i = i + 2) /* { dg-error \"condition expression refers to iteration variable\" } */\n ;\n i = 0;\n for (i = 0; i < 10; i = i + ((i & x) + 2)) /* { dg-error \"increment expression refers to iteration variable\" } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr56883.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "83 */\n/* { dg-do compile }\n/* { dg-options \"-O2 -fopenmp\" } */\n\nvoid\nf1 (int ***x)\n{\n int i, j, k;\nfor (i = 0; i < 10; ++i)\n {\n #pragma omp parallel shared(j)\n #pragma omp for\n\tfor (j = 0; j < 10; ++j)\n\t {\n\t #pragma omp parallel for\n\t for (k = 0; k < 10; ++k)\n\t\tx[i][j][k] = k;\n\t }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr56883.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i)\n {\n #pragma omp parallel shared(j)\n #pragma omp for\n\tfor (j = 0; j < 10; ++j)\n\t {\n\t for (k = 0; k < 10; ++k)\n\t\tx[i][j][k] = k;\n\t }\n }\n}\n\nvoid\nf2 (int ***x)\n{\n int i, j, k;\n#pragma omp parallel for schedule(static,1)\n for (i = 0; i < 10; ++i)\n {\n #pragma omp parallel shared(j)\n #pragma omp for schedule(static,1)\n\tfor (j = 0; j < 10; ++j)\n\t {\n\t #pragma omp parallel for schedule(static,1)\n\t for (k = 0; k < 10; ++k)\n\t\tx[i][j][k] = k;\n\t }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr56883.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1)", "context_chars": 100, "text": "\t for (k = 0; k < 10; ++k)\n\t\tx[i][j][k] = k;\n\t }\n }\n}\n\nvoid\nf2 (int ***x)\n{\n int i, j, k;\nfor (i = 0; i < 10; ++i)\n {\n #pragma omp parallel shared(j)\n #pragma omp for schedule(static,1)\n\tfor (j = 0; j < 10; ++j)\n\t {\n\t #pragma omp parallel for schedule(static,1)\n\t for (k = 0; k < 10; ++k)\n\t\tx[i][j][k] = k;\n\t }\n } #pragma omp parallel for schedule(static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr56883.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1)", "context_chars": 100, "text": "a omp parallel shared(j)\n #pragma omp for schedule(static,1)\n\tfor (j = 0; j < 10; ++j)\n\t {\n\t for (k = 0; k < 10; ++k)\n\t\tx[i][j][k] = k;\n\t }\n }\n}\n\nvoid\nf3 (int ***x)\n{\n int i, j, k;\n#pragma omp parallel for schedule(runtime)\n for (i = 0; i < 10; ++i)\n {\n #pragma omp parallel shared(j)\n #pragma omp for schedule(runtime)\n\tfor (j = 0; j < 10; ++j)\n\t {\n\t #pragma omp parallel for schedule(runtime)\n\t for (k = 0; k < 10; ++k)\n\t\tx[i][j][k] = k;\n\t }\n }\n} #pragma omp parallel for schedule(static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr56883.c", "omp_pragma_line": "#pragma omp parallel for schedule(runtime)", "context_chars": 100, "text": "\t for (k = 0; k < 10; ++k)\n\t\tx[i][j][k] = k;\n\t }\n }\n}\n\nvoid\nf3 (int ***x)\n{\n int i, j, k;\nfor (i = 0; i < 10; ++i)\n {\n #pragma omp parallel shared(j)\n #pragma omp for schedule(runtime)\n\tfor (j = 0; j < 10; ++j)\n\t {\n\t #pragma omp parallel for schedule(runtime)\n\t for (k = 0; k < 10; ++k)\n\t\tx[i][j][k] = k;\n\t }\n } #pragma omp parallel for schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-4.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "distribute parallel for\n for (i = 0; i < 10; i++)\n if (b) /* { dg-warning \"ambiguous\" } */\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-4.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "distribute parallel for\n for (i = 0; i < 10; i++)\n if (b) /* { dg-warning \"ambiguous\" } */\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-4.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "distribute parallel for\n for (i = 0; i < 10; i++)\n if (b) /* { dg-warning \"ambiguous\" } */\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-3.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(a)\n #pragma omp for\n for (i = 0; i < 10; i++)\n if (b) /* { dg-warning \"ambiguous\" } */\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-3.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* { dg-warning \"ambiguous\" } */\n #pragma omp taskloop\n for (i = 0; i < 10; i++)\n if (b)\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n\t else\n\t baz ();\n else\n bar ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-3.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } */\n for (i = 0; i < 10; i++)\n for (j = 0; j < 10; j++)\n\tif (b)\n\t bar ();\n else\n baz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-3.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": ");\n else\n baz ();\n\n if (a) /* { dg-warning \"ambiguous\" } */\n for (i = 0; i < 10; i++)\n for (j = 0; j < 10; j++)\n\tif (b)\n\t bar ();\n else\n baz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-3.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "omp parallel\n\tif (b)\n\t bar ();\n\telse\n\t baz ();\n }\n\n if (a)\n for (i = 0; i < 10; i++)\n for (j = 0; j < 10; j++)\n\t{\n\t if (b)\n\t bar ();\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-3.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "; j++)\n\t{\n\t if (b)\n\t bar ();\n\t}\n else\n baz ();\n\n if (a)\n for (i = 0; i < 10; i++)\n for (j = 0; j < 10; j++)\n\t{\n\t if (b)\n\t bar ();\n\t} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/pr58257.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)\t ", "context_chars": 100, "text": "pile } */\n/* { dg-options \"-O2 -fopenmp -Wall\" } */\n\nint\nfoo (int n)\n{\n int a[10][10];\n int x, y;\nfor (x = 0; x < n; x++)\t\t/* { dg-bogus \"may be used uninitialized in this function\" } #pragma omp parallel for collapse(2)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for simd ordered(1)\t ", "context_chars": 100, "text": "r \"clause must be closely nested inside a loop with .ordered. clause with a parameter\" } */\n }\n for (i = 0; i < 64; i++)\n {\n #pragma omp ordered depend(sink: i - 1)\t/* { dg-error \"clause must be closely nested inside a loop with .ordered. clause with a parameter\" } */\n #pragma omp ordered depend(source)\t/* { dg-error \"clause must be closely nested inside a loop with .ordered. clause with a parameter\" } */\n } #pragma omp parallel for simd ordered(1)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for ordered", "context_chars": 100, "text": "r \"clause must be closely nested inside a loop with .ordered. clause with a parameter\" } */\n }\n for (i = 0; i < 64; i++)\n {\n #pragma omp ordered depend(sink: i - 1)\t/* { dg-error \"clause must be closely nested inside a loop with .ordered. clause with a parameter\" } */\n #pragma omp ordered depend(source)\t/* { dg-error \"clause must be closely nested inside a loop with .ordered. clause with a parameter\" } */\n } #pragma omp parallel for ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/ordered-3.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r \"clause must be closely nested inside a loop with .ordered. clause with a parameter\" } */\n }\n for (i = 0; i < 64; i++)\n {\n #pragma omp ordered depend(sink: i - 1)\t/* { dg-error \"clause must be closely nested inside a loop with .ordered. clause with a parameter\" } */\n #pragma omp ordered depend(source)\t/* { dg-error \"clause must be closely nested inside a loop with .ordered. clause with a parameter\" } */\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/sink-4.c", "omp_pragma_line": "#pragma omp parallel for ordered(1)", "context_chars": 100, "text": "ly. */\n\ntypedef struct {\n char stuff[400];\n} foo;\n\nvoid\nfunk (foo *begin, foo *end)\n{\n foo *p;\nfor (p=end; p > begin; p--)\n {\n#pragma omp ordered depend(sink:p+1)\n void bar ();\n bar();\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/gridify-3.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "E];\n\t float Bs[BLOCK_SIZE][BLOCK_SIZE];\n\t float Cs[BLOCK_SIZE][BLOCK_SIZE];\n\t int C_row, C_col;\n\nfor (int row=0 ; row < BLOCK_SIZE ; row++)\n\t for (int col=0 ; col < BLOCK_SIZE ; col++)\n\t {\n Cs[row][col] = 0.0;\n\t } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/gridify-3.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "row][col] = 0.0;\n\t }\n\n\n for (int kblock = 0; kblock < K ; kblock += BLOCK_SIZE )\n\t {\nfor (int row=0 ; row < BLOCK_SIZE ; row++)\n for (int col=0 ; col < BLOCK_SIZE ; col++)\n\t\t {\n\t\t C_row = C_row_start + row;\n\t\t C_col = C_col_start + col;\n\t\t if ((C_row < M) && (kblock + col < K))\n\t\t As[row][col] = A[(C_row*LDA)+ kblock + col];\n\t\t else\n\t\t As[row][col] = 0;\n\t\t if ((kblock + row < K) && C_col < N)\n\t\t Bs[row][col] = B[((kblock+row)*LDB)+ C_col];\n\t\t else\n\t\t Bs[row][col] = 0;\n\t\t } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/gridify-3.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "l < N)\n\t\t Bs[row][col] = B[((kblock+row)*LDB)+ C_col];\n\t\t else\n\t\t Bs[row][col] = 0;\n\t\t }\n\nfor (int row=0 ; row < BLOCK_SIZE ; row++)\n for (int col=0 ; col < BLOCK_SIZE ; col++)\n\t\t {\n\t\t for (int e = 0; e < BLOCK_SIZE; ++e)\n Cs[row][col] += As[row][e] * Bs[e][col];\n\t\t } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/gridify-3.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": " Cs[row][col] += As[row][e] * Bs[e][col];\n\t\t }\n } /* End for kblock .. */\n\n\nfor (int row=0 ; row < BLOCK_SIZE ; row++)\n\t for (int col=0 ; col < BLOCK_SIZE ; col++)\n\t {\n C_row = C_row_start + row;\n C_col = C_col_start + col;\n\t if ((C_row < M) && (C_col < N))\n\t\t C[(C_row*LDC)+C_col] = alpha*Cs[row][col] + beta*C[(C_row*LDC)+C_col];\n\t } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/sink-2.c", "omp_pragma_line": "#pragma omp parallel for ordered(1)", "context_chars": 100, "text": "/* { dg-do compile } */\n\nvoid bar (int *);\n\nvoid\nfoo ()\n{\n int i,j;\nfor (i=0; i < 100; ++i)\n {\n#pragma omp ordered depend(sink:i-1)\n bar(&i);\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/schedule-simd-1.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (simd:static)", "context_chars": 100, "text": "t { x86_64-*-* i?86-*-* } } } */\n\n#define N 1024\nint a[N], b[N], c[N];\n\nvoid\nf1 (void)\n{\n int i;\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n}\n\nvoid\nf2 (void)\n{\n int i;\n #pragma omp parallel for simd schedule (simd: static, 7)\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n} #pragma omp parallel for simd schedule (simd:static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/schedule-simd-1.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (simd: static, 7)", "context_chars": 100, "text": "dule (simd:static)\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n}\n\nvoid\nf2 (void)\n{\n int i;\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n}\n\nvoid\nf3 (void)\n{\n int i;\n #pragma omp parallel for simd schedule (simd : dynamic, 7)\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n} #pragma omp parallel for simd schedule (simd: static, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/schedule-simd-1.c", "omp_pragma_line": "#pragma omp parallel for simd schedule (simd : dynamic, 7)", "context_chars": 100, "text": " (simd: static, 7)\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n}\n\nvoid\nf3 (void)\n{\n int i;\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n}\n\nvoid\nf4 (void)\n{\n int i;\n #pragma omp parallel for simd schedule ( simd:runtime)\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n} #pragma omp parallel for simd schedule (simd : dynamic, 7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/schedule-simd-1.c", "omp_pragma_line": "#pragma omp parallel for simd schedule ( simd:runtime)", "context_chars": 100, "text": "simd : dynamic, 7)\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n}\n\nvoid\nf4 (void)\n{\n int i;\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n}\n\nvoid\nf5 (void)\n{\n int i;\n #pragma omp parallel for simd schedule (simd:auto)\n for (i = 0; i < N; i++)\n a[i] = b[i] + c[i];\n} #pragma omp parallel for simd schedule ( simd:runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "distribute parallel for\n for (i = 0; i < 10; i++)\n if (b) /* { dg-warning \"ambiguous\" } */\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "distribute parallel for\n for (i = 0; i < 10; i++)\n if (b) /* { dg-warning \"ambiguous\" } */\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/Wparentheses-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "distribute parallel for\n for (i = 0; i < 10; i++)\n if (b) /* { dg-warning \"ambiguous\" } */\n\tfor (j = 0; j < 10; j++)\n\t if (c)\n\t bar ();\n else\n\tbaz ();\n\n if (a) /* { dg-warning \"ambiguous\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/clauses-1.c", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": "pse(1) nowait \\\n safelen(8) simdlen(4) aligned(q: 32)\n for (int i = 0; i < 64; i++)\n ll++;\n private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \\\n lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1)\n for (int i = 0; i < 64; i++)\n ll++;\n #pragma omp parallel for simd \\\n private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \\\n lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \\\n safelen(8) simdlen(4) aligned(q: 32)\n for (int i = 0; i < 64; i++)\n ll++;\n #pragma omp parallel sections \\\n private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \\\n lastprivate (l)\n {\n #pragma omp section\n {}\n #pragma omp section\n {}\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/clauses-1.c", "omp_pragma_line": "#pragma omp parallel for simd \\", "context_chars": 100, "text": "l) linear (ll:1) ordered schedule(static, 4) collapse(1)\n for (int i = 0; i < 64; i++)\n ll++;\n private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \\\n lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \\\n safelen(8) simdlen(4) aligned(q: 32)\n for (int i = 0; i < 64; i++)\n ll++;\n #pragma omp parallel sections \\\n private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \\\n lastprivate (l)\n {\n #pragma omp section\n {}\n #pragma omp section\n {}\n } #pragma omp parallel for simd \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/if-2.c", "omp_pragma_line": "#pragma omp parallel for simd if (target update: a) ", "context_chars": 100, "text": ") /* { dg-error \"expected .parallel. .if. clause modifier rather than .target update.\" } */\n ;\n for (i = 0; i < 16; i++)\n ;\n #pragma omp task if (task)\n ;\n #pragma omp task if (task: task)\n ;\n #pragma omp task if (parallel: a) /* { dg-error \"expected .task. .if. clause modifier rather than .parallel.\" } #pragma omp parallel for simd if (target update: a) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/c-c++-common/gomp/sink-3.c", "omp_pragma_line": "#pragma omp parallel for ordered(1)", "context_chars": 100, "text": " multiple undeclared sink variables\n gracefully. */\n\nvoid bar (int *);\n\nvoid\nfoo ()\n{\n int i,j;\nfor (i=0; i < 100; ++i)\n {\n#pragma omp ordered depend(sink:poo-1,paa+1) /* { dg-error \"poo.*declared.*paa.*declared\" } */\n bar(&i);\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/warn/Wduplicated-branches3.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "openmp\" }\n// { dg-require-effective-target fopenmp }\n\ntemplate void foo()\n{\n if (N > 0)\n {\nfor (int i = 0; i < 10; ++i) ;\n }\n}\n\nvoid bar()\n{\n foo<0>();\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr29965-4.C", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic)", "context_chars": 100, "text": "agma omp for schedule (dynamic)\n for (i = 0; i < 2834; i++)\n baz ();\n}\n\nvoid\nfoo2 ()\n{\n int i;\nfor (i = 0; i < 2834; i++)\n for (;;)\n ;\n}\n\nvoid\nbar2 ()\n{\n int i;\n#pragma omp parallel for schedule (dynamic)\n for (i = 0; i < 2834; i++)\n baz ();\n} #pragma omp parallel for schedule (dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr81154.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (foo)\t ", "context_chars": 100, "text": "}\n ;\n#pragma omp parallel firstprivate (foo)\t// { dg-error \"is not a variable in clause\" }\n ;\nfor (T i = 0; i < n; i++)\n ;\n#pragma omp parallel for linear (foo)\t// { dg-error \"is not a variable in clause\" } #pragma omp parallel for lastprivate (foo)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr81154.C", "omp_pragma_line": "#pragma omp parallel for linear (foo)\t ", "context_chars": 100, "text": "stprivate (foo)\t// { dg-error \"is not a variable in clause\" }\n for (T i = 0; i < n; i++)\n ;\nfor (T i = 0; i < n; i++)\n ;\n#pragma omp parallel reduction (+:foo)\t// { dg-error \"is not a variable in clause\" } #pragma omp parallel for linear (foo)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr81154.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (foo)\t ", "context_chars": 100, "text": "}\n ;\n#pragma omp parallel firstprivate (foo)\t// { dg-error \"is not a variable in clause\" }\n ;\nfor (T i = 0; i < n; i++)\n ;\n#pragma omp parallel for linear (foo)\t// { dg-error \"is not a variable in clause\" } #pragma omp parallel for lastprivate (foo)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr81154.C", "omp_pragma_line": "#pragma omp parallel for linear (foo)\t ", "context_chars": 100, "text": "stprivate (foo)\t// { dg-error \"is not a variable in clause\" }\n for (T i = 0; i < n; i++)\n ;\nfor (T i = 0; i < n; i++)\n ;\n#pragma omp parallel reduction (+:foo)\t// { dg-error \"is not a variable in clause\" } #pragma omp parallel for linear (foo)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/tpl-for-3.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "// { dg-do compile }\n\nvoid foo(int);\n\ntemplate void bar()\n{\n for (typename T::T i = 0; i < T::N; ++i)\n foo(i);\n}\n\nstruct A\n{\n typedef int T;\n static T N;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr29965-5.C", "omp_pragma_line": "#pragma omp parallel for schedule (static)", "context_chars": 100, "text": "ragma omp for schedule (static)\n for (i = 0; i < 2834; i++)\n baz ();\n}\n\nvoid\nfoo2 ()\n{\n int i;\nfor (i = 0; i < 2834; i++)\n for (;;)\n ;\n}\n\nvoid\nbar2 ()\n{\n int i;\n#pragma omp parallel for schedule (static)\n for (i = 0; i < 2834; i++)\n baz ();\n} #pragma omp parallel for schedule (static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/sink-1.C", "omp_pragma_line": "#pragma omp parallel for ordered(2)", "context_chars": 100, "text": "/* { dg-do compile } */\n\nvoid bar (int, int, int);\n\ntemplate\nvoid baz ()\n{\n T i, j;\nfor (i=0; i < 100; ++i)\n for (j=0; j < 100; ++j)\n {\n#pragma omp ordered depend(sink:i-3,j)\n\tbar (i, j, 0);\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr38639.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "639\n// { dg-do compile { target c++11 } }\n// { dg-options \"-fopenmp\" }\n\ntemplate void\nfoo ()\n{\nfor (auto i = i = 0; i<4; ++i)\t// { dg-error \"initializer expression refers to iteration variable\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr58567.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* { dg-do compile } */\n\n/* PR c++/58567 - was ICEing before */\n\ntemplate void foo()\n{\n for (typename T::X i = 0; i < 100; ++i) /* { dg-error \"'int' is not a class, struct, or union type|expected iteration declaration or initialization\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr66571-2.C", "omp_pragma_line": "#pragma omp parallel for simd private (a) firstprivate (b) lastprivate (c) linear (d:2)", "context_chars": 100, "text": "\nextern void bar (T, T, T, T);\n\ntemplate \nvoid\nfoo (T a, T b, T c, T d)\n{\n for (int i = 0; i < 10; i++)\n bar (a, b, c, d), d += 2;\n #pragma omp parallel private (c)\n #pragma omp single copyprivate (c)\n bar (a, b, c, d);\n T e = a;\n T f = b;\n T g = c;\n T h = d;\n #pragma omp parallel for simd private (e) firstprivate (f) lastprivate (g) linear (h:2)\n for (int i = 0; i < 10; i++)\n bar (e, f, g, h), h += 2;\n #pragma omp parallel private (g)\n #pragma omp single copyprivate (g)\n bar (e, f, g, h);\n}\n\nvoid\nbaz ()\n{\n int a = 0, b = 0, c = 0, d = 0;\n foo (a, b, c, d);\n foo (a, b, c, d);\n} #pragma omp parallel for simd private (a) firstprivate (b) lastprivate (c) linear (d:2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr66571-2.C", "omp_pragma_line": "#pragma omp parallel for simd private (e) firstprivate (f) lastprivate (g) linear (h:2)", "context_chars": 100, "text": "omp single copyprivate (c)\n bar (a, b, c, d);\n T e = a;\n T f = b;\n T g = c;\n T h = d;\n for (int i = 0; i < 10; i++)\n bar (e, f, g, h), h += 2;\n #pragma omp parallel private (g)\n #pragma omp single copyprivate (g)\n bar (e, f, g, h);\n}\n\nvoid\nbaz ()\n{\n int a = 0, b = 0, c = 0, d = 0;\n foo (a, b, c, d);\n foo (a, b, c, d);\n} #pragma omp parallel for simd private (e) firstprivate (f) lastprivate (g) linear (h:2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr35158.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " PR c++/35158\n// { dg-do compile }\n// { dg-options \"-fopenmp\" }\n\nint main(int argc, char *argv[])\n{\nfor (int i(0) ; // { dg-error \"parenthesized initialization is not allowed in OpenMP 'for' loop\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/this-1.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (this)\t ", "context_chars": 100, "text": "rstprivate (this)\t// { dg-error \".this. allowed in OpenMP only in .declare simd. clauses\" }\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel shared (this)\t\t// { dg-error \".this. allowed in OpenMP only in .declare simd. clauses\" } #pragma omp parallel for lastprivate (this)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/this-1.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (this)\t ", "context_chars": 100, "text": "rstprivate (this)\t// { dg-error \".this. allowed in OpenMP only in .declare simd. clauses\" }\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel shared (this)\t\t// { dg-error \".this. allowed in OpenMP only in .declare simd. clauses\" } #pragma omp parallel for lastprivate (this)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/linear-2.C", "omp_pragma_line": "#pragma omp parallel for linear (i:1) collapse(1)", "context_chars": 100, "text": "lid for .#pragma omp distribute parallel for.\" }\n for (i = 0; i < 32; i++)\n ;\n}\n\nvoid\nf3 ()\n{\n for (i = 0; i < 32; i++)\t\t\t\t// { dg-error \"iteration variable .i. should not be linear\" } #pragma omp parallel for linear (i:1) collapse(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/linear-2.C", "omp_pragma_line": "#pragma omp parallel for linear (i:1) collapse(2) linear (j:2)\t ", "context_chars": 100, "text": "parallel for.\" }\n for (i = 0; i < 32; i++)\n for (j = 0; j < 32; j+=2)\n ;\n}\n\nvoid\nf6 ()\n{\n for (i = 0; i < 32; i++)\t\t\t\t\t\t// { dg-error \"iteration variable .j. should not be linear\" } #pragma omp parallel for linear (i:1) collapse(2) linear (j:2)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/linear-2.C", "omp_pragma_line": "#pragma omp parallel for linear (i:1) collapse(1)", "context_chars": 100, "text": "omp distribute parallel for.\" }\n for (i = 0; i < 32; i++)\n ;\n}\n\ntemplate \nvoid\nf9 ()\n{\n for (i = 0; i < 32; i++)\t\t\t\t// { dg-error \"iteration variable .i. should not be linear\" } #pragma omp parallel for linear (i:1) collapse(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/linear-2.C", "omp_pragma_line": "#pragma omp parallel for linear (i:1) collapse(2) linear (j:2)\t ", "context_chars": 100, "text": " for (i = 0; i < 32; i++)\n for (j = 0; j < 32; j+=2)\n ;\n}\n\ntemplate \nvoid\nf12 ()\n{\n for (i = 0; i < 32; i++)\t\t\t\t\t\t// { dg-error \"iteration variable .j. should not be linear\" } #pragma omp parallel for linear (i:1) collapse(2) linear (j:2)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr31769.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ");\n void foo (int);\n E f;\n};\n\nint\nmain ()\n{\n try\n {\n F f (10);\n F g (10);\n C h (0, 9);\nfor (int i = 0; i < 2; ++i)\n g += f (h);\n }\n catch (int &e)\n {\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr30558.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "bar (2); }\n F &operator () (F x);\n void bar (int);\n};\n\nint\nmain ()\n{\n try\n {\n G g;\nfor (int i = 0; i < 10; ++i)\n {\n\tF j (i);\n\tF f = g (j);\n\tF h = foo (f);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr79664.C", "omp_pragma_line": "#pragma omp parallel for\t\t\t ", "context_chars": 100, "text": "64\n// { dg-do compile }\n// { dg-options \"-std=c++14 -fopenmp\" }\n\nconstexpr int\nf1 ()\n{\n int i = 0;\nfor (i = 0; i < 10; ++i)\n ;\n return 0;\n}\n\nconstexpr int\nf2 ()\n{\n int i = 0;\n#pragma omp parallel\t\t\t\t// { dg-error \"is not a constant expression\" }\n i = 5;\n return 0;\n} #pragma omp parallel for\t\t\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/block-0.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i < 10; ++i)\n bar ();\n #pragma omp sections\n { bar(); }\n #pragma omp parallel\n bar ();\n for (int i = 0; i < 10; ++i)\n bar ();\n #pragma omp parallel sections\n {\n {\n\tbar ();\n\tbar ();\n }\n #pragma omp section\n bar ();\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr51360.C", "omp_pragma_line": "#pragma omp parallel for num_threads (m) if (n) schedule (static, o)", "context_chars": 100, "text": "T d)\n{\n T m, n, o, p, i;\n m = 6;\n n = 1;\n o = 5;\n p = 1;\n a = 6;\n b = 1;\n c = 5;\n d = 1;\n for (i = 0; i < 10; i++)\n ;\n #pragma omp parallel for num_threads (a) if (b) schedule (static, c)\n for (i = 0; i < 10; i++)\n ;\n #pragma omp task final (p)\n ;\n #pragma omp task final (d)\n ;\n}\n\nvoid\nbar ()\n{\n foo (0, 0, 0, 0);\n} #pragma omp parallel for num_threads (m) if (n) schedule (static, o)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr51360.C", "omp_pragma_line": "#pragma omp parallel for num_threads (a) if (b) schedule (static, c)", "context_chars": 100, "text": "gma omp parallel for num_threads (m) if (n) schedule (static, o)\n for (i = 0; i < 10; i++)\n ;\n for (i = 0; i < 10; i++)\n ;\n #pragma omp task final (p)\n ;\n #pragma omp task final (d)\n ;\n}\n\nvoid\nbar ()\n{\n foo (0, 0, 0, 0);\n} #pragma omp parallel for num_threads (a) if (b) schedule (static, c)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr33372-1.C", "omp_pragma_line": "#pragma omp parallel for schedule(static, n)", "context_chars": 100, "text": "#pragma omp parallel num_threads(n)\t// { dg-error \"'num_threads' expression must be integral\" }\n ;\nfor (int i = 0; i < 10; i++)\t\t// { dg-error \"chunk size expression must be integral\" } #pragma omp parallel for schedule(static, n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr67523.C", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "omp for simd\n for (T i = x; i < y; i++)\t// { dg-error \"used with class iteration variable\" }\n ;\nfor (T i = x; i < y; i++)\t// { dg-error \"used with class iteration variable\" } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/sink-2.C", "omp_pragma_line": "#pragma omp parallel for ordered(1)", "context_chars": 100, "text": ":vector v;\n for (int i=1; i<=5; i++) v.push_back(i);\n\n std::vector::const_iterator it;\n\nfor (it = v.begin(); it < v.end(); ++it)\n {\n#pragma omp ordered depend(sink:it-1)\n std::cout << *it << '\\n';\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/sink-2.C", "omp_pragma_line": "#pragma omp parallel for ordered(1)", "context_chars": 100, "text": "::vector v;\n for (int i=1; i<=5; i++) v.push_back(i);\n\n std::vector::const_iterator it;\nfor (it = v.begin(); it < v.end(); ++it)\n {\n#pragma omp ordered depend(sink:it-1)\n std::cout << *it << '\\n';\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/sink-2.C", "omp_pragma_line": "#pragma omp parallel for ordered(1)", "context_chars": 100, "text": "tor v;\n for (int i=1; i<=5; i++) v.push_back(i);\n\n typename std::vector::const_iterator it;\nfor (it = v.begin(); it < v.end(); ++it)\n {\n#pragma omp ordered depend(sink:it-1)\n std::cout << *it << '\\n';\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr48632.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "do compile }\n// { dg-options \"-fopenmp\" }\n\ntemplate\nvoid\nfoo (T *x, T *y, unsigned z)\n{\nfor (T *p = x; p < y; p += z)\n ;\n#pragma omp parallel for\n for (T *p = y; p > x; p -= z)\n ;\n}\n\nint\nmain ()\n{\n char buf[10];\n foo (&buf[0], &buf[9], 1);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr48632.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\nvoid\nfoo (T *x, T *y, unsigned z)\n{\n#pragma omp parallel for\n for (T *p = x; p < y; p += z)\n ;\nfor (T *p = y; p > x; p -= z)\n ;\n}\n\nint\nmain ()\n{\n char buf[10];\n foo (&buf[0], &buf[9], 1);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr32177.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "sophia.inria.fr>\n\nstruct A\n{\n A () {}\n ~A () {}\n int s () const { return 1; }\n};\n\nvoid\nf1 ()\n{\n for (int i = 1; i <= A ().s (); ++i)\n ;\n}\n\nvoid\nf2 ()\n{\n #pragma omp parallel for\n for (int i = A ().s (); i <= 20; ++i)\n ;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr32177.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " ()\n{\n #pragma omp parallel for\n for (int i = 1; i <= A ().s (); ++i)\n ;\n}\n\nvoid\nf2 ()\n{\n for (int i = A ().s (); i <= 20; ++i)\n ;\n}\n\nvoid\nf3 ()\n{\n #pragma omp parallel for\n for (int i = 1; i <= 20; i += A ().s ())\n ;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr32177.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "()\n{\n #pragma omp parallel for\n for (int i = A ().s (); i <= 20; ++i)\n ;\n}\n\nvoid\nf3 ()\n{\n for (int i = 1; i <= 20; i += A ().s ())\n ;\n}\n\nvoid\nf4 ()\n{\n int i;\n #pragma omp parallel for\n for (i = A ().s (); i <= 20; i++)\n ;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr33333.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "d/33333\n// { dg-do compile }\n\nstruct A\n{\n int n;\n void foo ();\n};\n\nvoid\nA::foo ()\n{\n try\n {\n for (int i = 0; i < n; ++i)\n\t;\n } catch (...) {} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/block-11.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/* PR c++/24516 */\n/* { dg-do compile } */\n\nvoid\nbar (int *p)\n{\n int m;\nfor (m = 0; m < 1000; ++m)\n switch (p[m])\n {\n case 1:\n\tp[m] = 2;\n\tbreak;\n default:\n\tp[m] = 3;\n\tbreak;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr37533.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "// PR c++/37533\n// { dg-do compile }\n// { dg-options \"-fopenmp\" }\n\ntemplate\nvoid\nf1 ()\n{\nfor (int i = \"\"; i < 4; ++i)\t// { dg-error \"invalid conversion from\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr37533.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i < 4; ++i)\t// { dg-error \"invalid conversion from\" }\n ;\n}\n\ntemplate\nvoid\nf2 ()\n{\n int i;\nfor (i = \"\"; i < 4; ++i)\t// { dg-error \"invalid conversion from\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr37533.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\"; i < 4; ++i)\t// { dg-error \"invalid conversion from\" }\n ;\n}\n\ntemplate\nvoid\nf3 ()\n{\nfor (T i = \"\"; i < 4; ++i)\t// { dg-error \"invalid conversion from\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr37533.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "4; ++i)\t// { dg-error \"invalid conversion from\" }\n ;\n}\n\ntemplate\nvoid\nf4 ()\n{\n T i;\nfor (i = \"\"; i < 4; ++i)\t// { dg-error \"invalid conversion from\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/for-13.C", "omp_pragma_line": "#pragma omp parallel for default(none)", "context_chars": 100, "text": " }\n// { dg-options \"-fopenmp -fdump-tree-lower\" }\n\nextern void bar(int);\nvoid foo(void)\n{\n int i;\n\nfor (i = 0; i < 10; i++)\n bar(i);\n}\n\n// { dg-final { scan-tree-dump-times \"omp_data_o\" 0 \"lower\" } } #pragma omp parallel for default(none)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr66571-1.C", "omp_pragma_line": "#pragma omp parallel for simd shared (a, c) reduction (+:b)", "context_chars": 100, "text": "ename U>\nextern void bar (T, T, U);\n\ntemplate \nvoid\nfoo (T a, T b, U c)\n{\n for (int i = 0; i < 10; i++)\n bar (a, b, c);\n #pragma omp target map(tofrom:a, c[0:5])\n ;\n #pragma omp task depend(inout:c[4:2])\n ;\n T d = a;\n T e = b;\n U f = c;\n #pragma omp parallel for simd shared (d, f) reduction (+:e)\n for (int i = 0; i < 10; i++)\n bar (d, e, f);\n #pragma omp target map(tofrom:d, f[0:5])\n ;\n #pragma omp task depend(inout:f[4:2])\n ;\n}\n\nvoid\nbaz ()\n{\n int a = 0, b = 0, cb[10] = {}, *c = cb;\n foo (a, b, c);\n foo (a, b, c);\n} #pragma omp parallel for simd shared (a, c) reduction (+:b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr66571-1.C", "omp_pragma_line": "#pragma omp parallel for simd shared (d, f) reduction (+:e)", "context_chars": 100, "text": "m:a, c[0:5])\n ;\n #pragma omp task depend(inout:c[4:2])\n ;\n T d = a;\n T e = b;\n U f = c;\n for (int i = 0; i < 10; i++)\n bar (d, e, f);\n #pragma omp target map(tofrom:d, f[0:5])\n ;\n #pragma omp task depend(inout:f[4:2])\n ;\n}\n\nvoid\nbaz ()\n{\n int a = 0, b = 0, cb[10] = {}, *c = cb;\n foo (a, b, c);\n foo (a, b, c);\n} #pragma omp parallel for simd shared (d, f) reduction (+:e)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/for-20.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "do compile }\n// { dg-options -fopenmp }\n\nint foo() { return 0; }\n\ntemplate void bar()\n{\nfor (T i = foo(); i < 8; ++i) {} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (a, b, c, e, f, g)", "context_chars": 100, "text": "lel private (a, b, c, e, f, g)\n ;\n #pragma omp parallel firstprivate (a, b, c, e, f, g)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp simd linear (a, b, c : 1)\n for (int i = 0; i < 10; i++)\n {\n\ta++;\n\tb++;\n\tc++;\n } #pragma omp parallel for lastprivate (a, b, c, e, f, g)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for reduction (+:a, b, c, e, f)", "context_chars": 100, "text": " omp simd linear (a, b, c : 1)\n for (int i = 0; i < 10; i++)\n {\n\ta++;\n\tb++;\n\tc++;\n }\n for (int i = 0; i < 10; i++)\n ;\n return 0;\n}\n\nint\nB::m2 ()\n{\n #pragma omp parallel private (h)\t// { dg-error \"is predetermined .shared. for .private.\" }\n ;\n #pragma omp parallel firstprivate (h)\n ;\n #pragma omp parallel for lastprivate (h)\t// { dg-error \"is predetermined .shared. for .lastprivate.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp simd linear (h : 1)\t// { dg-error \"is predetermined .shared. for .linear.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel for reduction (+:h)\t// { dg-error \"is predetermined .shared. for .reduction.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel for reduction (+:g)\t// { dg-error \"has const type for .reduction.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel shared (a)\n ;\n #pragma omp parallel shared (b)\n ;\n #pragma omp parallel shared (c)\n ;\n #pragma omp parallel shared (e)\n ;\n #pragma omp parallel shared (f)\n ;\n #pragma omp parallel shared (g)\n ;\n #pragma omp parallel shared (h)\t// { dg-error \"is predetermined .shared. for .shared.\" }\n ;\n return 0;\n} #pragma omp parallel for reduction (+:a, b, c, e, f)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (h)\t ", "context_chars": 100, "text": "r \"is predetermined .shared. for .private.\" }\n ;\n #pragma omp parallel firstprivate (h)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp simd linear (h : 1)\t// { dg-error \"is predetermined .shared. for .linear.\" } #pragma omp parallel for lastprivate (h)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for reduction (+:h)\t ", "context_chars": 100, "text": "// { dg-error \"is predetermined .shared. for .linear.\" }\n for (int i = 0; i < 10; i++)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel for reduction (+:g)\t// { dg-error \"has const type for .reduction.\" } #pragma omp parallel for reduction (+:h)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for reduction (+:g)\t ", "context_chars": 100, "text": "{ dg-error \"is predetermined .shared. for .reduction.\" }\n for (int i = 0; i < 10; i++)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel shared (a)\n ;\n #pragma omp parallel shared (b)\n ;\n #pragma omp parallel shared (c)\n ;\n #pragma omp parallel shared (e)\n ;\n #pragma omp parallel shared (f)\n ;\n #pragma omp parallel shared (g)\n ;\n #pragma omp parallel shared (h)\t// { dg-error \"is predetermined .shared. for .shared.\" } #pragma omp parallel for reduction (+:g)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (b, c, e, f, g)", "context_chars": 100, "text": " parallel private (b, c, e, f, g)\n ;\n #pragma omp parallel firstprivate (b, c, e, f, g)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp simd linear (b, c : 1)\n for (int i = 0; i < 10; i++)\n {\n\tb++;\n\tc++;\n } #pragma omp parallel for lastprivate (b, c, e, f, g)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for reduction (+:b, c, f)", "context_chars": 100, "text": " #pragma omp simd linear (b, c : 1)\n for (int i = 0; i < 10; i++)\n {\n\tb++;\n\tc++;\n }\n for (int i = 0; i < 10; i++)\n ;\n return 0;\n}\n\nint\nB::m4 () const\n{\n #pragma omp parallel private (a)\t// { dg-error \"is predetermined .shared. for .private.\" }\n ;\n #pragma omp parallel firstprivate (a)\n ;\n #pragma omp parallel for lastprivate (a)\t// { dg-error \"is predetermined .shared. for .lastprivate.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp simd linear (a : 1)\t// { dg-error \"is predetermined .shared. for .linear.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel for reduction (+:a)\t// { dg-error \"is predetermined .shared. for .reduction.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel private (h)\t// { dg-error \"is predetermined .shared. for .private.\" }\n ;\n #pragma omp parallel firstprivate (h)\n ;\n #pragma omp parallel for lastprivate (h)\t// { dg-error \"is predetermined .shared. for .lastprivate.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp simd linear (h : 1)\t// { dg-error \"is predetermined .shared. for .linear.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel for reduction (+:h)\t// { dg-error \"is predetermined .shared. for .reduction.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel for reduction (+:e)\t// { dg-error \"has const type for .reduction.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel for reduction (+:g)\t// { dg-error \"has const type for .reduction.\" }\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel shared (a)\t// { dg-error \"is predetermined .shared. for .shared.\" }\n ;\n #pragma omp parallel shared (b)\n ;\n #pragma omp parallel shared (c)\n ;\n #pragma omp parallel shared (e)\n ;\n #pragma omp parallel shared (f)\n ;\n #pragma omp parallel shared (g)\n ;\n #pragma omp parallel shared (h)\t// { dg-error \"is predetermined .shared. for .shared.\" }\n ;\n return 0;\n} #pragma omp parallel for reduction (+:b, c, f)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (a)\t ", "context_chars": 100, "text": "r \"is predetermined .shared. for .private.\" }\n ;\n #pragma omp parallel firstprivate (a)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp simd linear (a : 1)\t// { dg-error \"is predetermined .shared. for .linear.\" } #pragma omp parallel for lastprivate (a)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for reduction (+:a)\t ", "context_chars": 100, "text": "// { dg-error \"is predetermined .shared. for .linear.\" }\n for (int i = 0; i < 10; i++)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel private (h)\t// { dg-error \"is predetermined .shared. for .private.\" } #pragma omp parallel for reduction (+:a)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for lastprivate (h)\t ", "context_chars": 100, "text": "r \"is predetermined .shared. for .private.\" }\n ;\n #pragma omp parallel firstprivate (h)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp simd linear (h : 1)\t// { dg-error \"is predetermined .shared. for .linear.\" } #pragma omp parallel for lastprivate (h)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for reduction (+:h)\t ", "context_chars": 100, "text": "// { dg-error \"is predetermined .shared. for .linear.\" }\n for (int i = 0; i < 10; i++)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel for reduction (+:e)\t// { dg-error \"has const type for .reduction.\" } #pragma omp parallel for reduction (+:h)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for reduction (+:e)\t ", "context_chars": 100, "text": "{ dg-error \"is predetermined .shared. for .reduction.\" }\n for (int i = 0; i < 10; i++)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel for reduction (+:g)\t// { dg-error \"has const type for .reduction.\" } #pragma omp parallel for reduction (+:e)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/member-2.C", "omp_pragma_line": "#pragma omp parallel for reduction (+:g)\t ", "context_chars": 100, "text": "n (+:e)\t// { dg-error \"has const type for .reduction.\" }\n for (int i = 0; i < 10; i++)\n ;\n for (int i = 0; i < 10; i++)\n ;\n #pragma omp parallel shared (a)\t// { dg-error \"is predetermined .shared. for .shared.\" } #pragma omp parallel for reduction (+:g)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/sharing-1.C", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": " static int locvar;\n static int *p;\n int i, j, s, l;\n\n p = new int;\n *p = 7;\n s = 6;\n l = 0;\ndefault (none) private (p) shared (s) \n for (i = 0; i < 64; i++)\n {\n int k = foo (0);\t/* Predetermined - private (automatic var declared */\n k++;\t\t/* in scope of construct). */\n thrglobalvar++;\t/* Predetermined - threadprivate. */\n thrlocvar++;\t/* Predetermined - threadprivate. */\n foo (i);\t\t/* Predetermined - private (omp for loop variable). */\n foo (constvar.x);\t/* Predetermined - shared (const qualified type). */\n foo (T::t.i);\t/* Predetermined - shared (static data member). */\n foo (*p);\t\t/* *p predetermined - shared (heap allocated */\n (*p)++;\t\t/* storage). */\n bar (p);\t\t/* Explicitly determined - private. */\n foo (s);\t\t/* Explicitly determined - shared. */\n globalvar++;\t/* { dg-error \"not specified in\" } */\n locvar++;\t\t/* { dg-error \"not specified in\" } */\n l++;\t\t/* { dg-error \"not specified in\" } */\n for (j = 0; j < 2; j++); /* { dg-error \"not specified in\" } */\n baz (constmutvar);/* { dg-error \"not specified in\" } */\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr27415.C", "omp_pragma_line": "#pragma omp parallel for firstprivate (i)", "context_chars": 100, "text": "\"should not be firstprivate\" }\n for (i = 0; i < 10; i++)\n ;\n}\n\nvoid\ntest2 (void)\n{\n int i = 0;\nfor (i = 0; i < 10; i++)\t\t\t// { dg-error \"should not be firstprivate\" } #pragma omp parallel for firstprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr27415.C", "omp_pragma_line": "#pragma omp parallel for reduction (*:i)", "context_chars": 100, "text": "or \"should not be reduction\" }\n for (i = 0; i < 10; i++)\n ;\n}\n\nvoid\ntest4 (void)\n{\n int i = 0;\nfor (i = 0; i < 10; i++)\t\t\t// { dg-error \"should not be reduction\" } #pragma omp parallel for reduction (*:i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/openmp-simd-2.C", "omp_pragma_line": "#pragma omp parallel for simd num_threads(4) safelen(64)", "context_chars": 100, "text": "!= s.s || u != s.s)\n abort ();\n return s.s;\n}\n\n\nvoid bar(int n, float *a, float *b)\n{\n int i; \nfor (i = 0; i < n ; i++)\n a[i] = b[i];\n}\n\n/* { dg-final { scan-tree-dump-times \"Function void omp declare reduction operator\\\\+\" 1 \"original\" } } #pragma omp parallel for simd num_threads(4) safelen(64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr33372-2.C", "omp_pragma_line": "#pragma omp parallel for schedule(static, n)", "context_chars": 100, "text": " \"-fopenmp\" }\n\ntemplate \nvoid f ()\n{\n T n = 6;\n#pragma omp parallel num_threads(n)\n ;\nfor (int i = 0; i < 10; i++)\n ;\n}\n\nvoid g ()\n{\n f ();\n} #pragma omp parallel for schedule(static, n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr35078.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "// PR c++/35078\n// { dg-do compile }\n// { dg-options \"-fopenmp\" }\n\ntemplate void\nfoo ()\n{\nfor (int& i = 0; i < 10; ++i)\t// { dg-error \"invalid type for iteration variable\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr35078.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 10; ++i)\t// { dg-error \"invalid type for iteration variable\" }\n ;\n}\n\nvoid\nbar ()\n{\n int j = 0;\nfor (int& i = j; i < 10; ++i)\t// { dg-error \"invalid type for iteration variable\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr33372-3.C", "omp_pragma_line": "#pragma omp parallel for schedule(static, n)", "context_chars": 100, "text": "#pragma omp parallel num_threads(n)\t// { dg-error \"'num_threads' expression must be integral\" }\n ;\nfor (int i = 0; i < 10; i++)\t\t// { dg-error \"chunk size expression must be integral\" } #pragma omp parallel for schedule(static, n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr27359.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "// PR c++/27359\n// { dg-do compile }\n\nvoid\nfoo ()\n{\nfor (int i; i < 1; ++i)\t// { dg-error \"expected|was not declared\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr31598.C", "omp_pragma_line": "#pragma omp parallel for firstprivate(b) schedule(guided)", "context_chars": 100, "text": "uble E; };\n\ntemplate \nstruct R\n{\n R()\n {\n typedef A B;\n B b;\n for (int t = 0; t < 10; ++t)\n\t;\n }\n};\n\ntemplate \nstruct S\n{\n S()\n {\n typedef C B;\n B b;\n #pragma omp parallel for firstprivate(b)\n for (int t = 0; t < 10; ++t)\n\t;\n }\n} #pragma omp parallel for firstprivate(b) schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr31598.C", "omp_pragma_line": "#pragma omp parallel for firstprivate(b)", "context_chars": 100, "text": "\n\t;\n }\n};\n\ntemplate \nstruct S\n{\n S()\n {\n typedef C B;\n B b;\n for (int t = 0; t < 10; ++t)\n\t;\n }\n};\n\nstruct U\n{\n U()\n {\n D b;\n #pragma omp parallel for firstprivate(b)\n for (int t = 0; t < 10; ++t)\n\t;\n }\n} #pragma omp parallel for firstprivate(b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr31598.C", "omp_pragma_line": "#pragma omp parallel for firstprivate(b)", "context_chars": 100, "text": "for firstprivate(b)\n for (int t = 0; t < 10; ++t)\n\t;\n }\n};\n\nstruct U\n{\n U()\n {\n D b;\n for (int t = 0; t < 10; ++t)\n\t;\n }\n};\n\nint\nmain ()\n{\n R r;\n S s;\n U u;\n return 0;\n} #pragma omp parallel for firstprivate(b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/sink-3.C", "omp_pragma_line": "#pragma omp parallel for ordered(1)", "context_chars": 100, "text": "uct {\n char stuff[400];\n} foo;\n\nfoo *end, *begin, *p;\n\ntemplate\nvoid\nfunk ()\n{\n int i,j;\nfor (p=end; p > begin; p--)\n {\n#pragma omp ordered depend(sink:p+1)\n void bar ();\n bar();\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr29965-6.C", "omp_pragma_line": "#pragma omp parallel for schedule (static, 16)", "context_chars": 100, "text": "a omp for schedule (static, 16)\n for (i = 0; i < 2834; i++)\n baz ();\n}\n\nvoid\nfoo2 ()\n{\n int i;\nfor (i = 0; i < 2834; i++)\n for (;;)\n ;\n}\n\nvoid\nbar2 ()\n{\n int i;\n#pragma omp parallel for schedule (static, 16)\n for (i = 0; i < 2834; i++)\n baz ();\n} #pragma omp parallel for schedule (static, 16)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/openmp-simd-1.C", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": " a[i] = b[i];\n#pragma omp distribute parallel for simd\n for (i = 0; i < n ; i++)\n a[i] = b[i];\nfor (i = 0; i < n ; i++)\n a[i] = b[i];\n#pragma omp teams distribute simd\n for (i = 0; i < n ; i++)\n a[i] = b[i];\n#pragma omp target teams distribute simd\n for (i = 0; i < n ; i++)\n a[i] = b[i];\n#pragma omp teams distribute parallel for simd\n for (i = 0; i < n ; i++)\n a[i] = b[i];\n#pragma omp target teams distribute parallel for simd\n for (i = 0; i < n ; i++)\n a[i] = b[i];\n}\n\n/* { dg-final { scan-tree-dump-times \"pragma omp simd\" 9 \"original\" } } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/pr78363-1.C", "omp_pragma_line": "#pragma omp parallel for reduction (+: n)", "context_chars": 100, "text": "{ dg-require-effective-target c++11 }\n// { dg-additional-options \"-g\" }\n\nint main()\n{\n int n = 0;\n\nfor (int i = [](){ return 3; } #pragma omp parallel for reduction (+: n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/method-1.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/* PR c++/24513 */\n/* { dg-do compile } */\n\nstruct S\n{\n void foo (int *p)\n {\nfor (int i = 0; i < 1000; ++i)\n p[i]=0;\n }\n void bar ()\n {\n#pragma omp master\n j = 2;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/block-8.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "// { dg-do compile }\n// PR 24451\n\nint foo()\n{\n int i;\n\n for (i = 0; i < 10; ++i)\n return 0;\t\t\t// { dg-error \"invalid exit\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/for-15.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++);\n\n#pragma omp for\n for (T j = 0; j < 10; j++);\n\n#pragma omp for\n for (T j = 0; j < 10; j++);\n\nfor (int k = 0; k < 10; k++);\n\n#pragma omp parallel for\n for (int k = 0; k < 10; k++);\n\n#pragma omp parallel for\n for (T l = 0; l < 10; l++);\n\n#pragma omp parallel for\n for (T l = 0; l < 10; l++);\n}\n\nvoid bar ()\n{\n foo ();\n foo ();\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/for-15.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ma omp for\n for (T j = 0; j < 10; j++);\n\n#pragma omp parallel for\n for (int k = 0; k < 10; k++);\n\nfor (int k = 0; k < 10; k++);\n\n#pragma omp parallel for\n for (T l = 0; l < 10; l++);\n\n#pragma omp parallel for\n for (T l = 0; l < 10; l++);\n}\n\nvoid bar ()\n{\n foo ();\n foo ();\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/for-15.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "llel for\n for (int k = 0; k < 10; k++);\n\n#pragma omp parallel for\n for (int k = 0; k < 10; k++);\n\nfor (T l = 0; l < 10; l++);\n\n#pragma omp parallel for\n for (T l = 0; l < 10; l++);\n}\n\nvoid bar ()\n{\n foo ();\n foo ();\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/g++.dg/gomp/for-15.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rallel for\n for (int k = 0; k < 10; k++);\n\n#pragma omp parallel for\n for (T l = 0; l < 10; l++);\n\nfor (T l = 0; l < 10; l++);\n}\n\nvoid bar ()\n{\n foo ();\n foo ();\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr29965-4.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic)", "context_chars": 100, "text": " omp for schedule (dynamic)\n for (i = 0; i < 2834; i++)\n baz ();\n}\n\nvoid\nfoo2 (void)\n{\n int i;\nfor (i = 0; i < 2834; i++)\n for (;;)\n ;\n}\n\nvoid\nbar2 (void)\n{\n int i;\n#pragma omp parallel for schedule (dynamic)\n for (i = 0; i < 2834; i++)\n baz ();\n} #pragma omp parallel for schedule (dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr25990.c", "omp_pragma_line": "#pragma omp parallel for private(m,liter,s)", "context_chars": 100, "text": "s)-1)*(Ps_slice_stride)) ) ] = 0.0 + 0.0 * (__extension__ 1.0iF);\n\t }\n }\n }\n\n\n int liter ;\n\nfor ( n = 1 ; n <= n_max ; n++) {\n for ( m = 1 ; m <= m_max ; m++) {\n\t for ( liter = 1 ; liter <= KK ; liter++ ) {\n\t for ( s = 1 ; s <= num_s ; s++) {\n\n\n\t\t int LM_column;\n\t\t float NN[4];\n\t\t float OO[4];\n\t\t float PP[4];\n\t\t float QQ[4];\n\t\t float k;\n\t\t int s_index;\n\t\t float RR;\n\n\t\t s_index = s + (DD -1);\n\t\t RR = f[ ( (s_index)-1) ];\n\t\t k = 99.0;\n\n\t\t NN[1 -1] = X[ ( ((m)-1) + ( ((n)-1)*m_max ))];\n\t\t NN[2 -1] = Y[ ( ((m)-1) + ( ((n)-1)*m_max ))];\n\t\t NN[3 -1] = Z[ ( ((m)-1) + ( ((n)-1)*m_max ))];\n\t\t NN[4 -1] = 1.0;\n\n\t\t LM_column = ((liter -1) * 3) + 1;\n\t\t OO[1 -1] = MM[ ( ((1)-1) + ( ((LM_column)-1)*4 ))];\n\t\t OO[2 -1] = MM[ ( ((2)-1) + ( ((LM_column)-1)*4 ))];\n\t\t OO[3 -1] = MM[ ( ((3)-1) + ( ((LM_column)-1)*4 ))];\n\t\t OO[4 -1] = MM[ ( ((4)-1) + ( ((LM_column)-1)*4 ))];\n\n\t\t LM_column = ((liter -1) * 3) + 2;\n\t\t PP[1 -1] = MM[ ( ((1)-1) + ( ((LM_column)-1)*4 ))];\n\t\t PP[2 -1] = MM[ ( ((2)-1) + ( ((LM_column)-1)*4 ))];\n\t\t PP[3 -1] = MM[ ( ((3)-1) + ( ((LM_column)-1)*4 ))];\n\t\t PP[4 -1] = MM[ ( ((4)-1) + ( ((LM_column)-1)*4 ))];\n\n\t\t LM_column = ((liter -1) * 3) + 3;\n\t\t QQ[1 -1] = MM[ ( ((1)-1) + ( ((LM_column)-1)*4 ))];\n\t\t QQ[2 -1] = MM[ ( ((2)-1) + ( ((LM_column)-1)*4 ))];\n\t\t QQ[3 -1] = MM[ ( ((3)-1) + ( ((LM_column)-1)*4 ))];\n\t\t QQ[4 -1] = MM[ ( ((4)-1) + ( ((LM_column)-1)*4 ))];\n\n\t }\n\t }\n }\n } #pragma omp parallel for private(m,liter,s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr25990.c", "omp_pragma_line": "#pragma omp parallel for private(m)", "context_chars": 100, "text": "umn)-1)*4 ))];\n\t\t QQ[4 -1] = MM[ ( ((4)-1) + ( ((LM_column)-1)*4 ))];\n\n\t }\n\t }\n }\n }\n\n\nfor ( n = 1 ; n <= n_max ; n++) {\n for ( m = 1 ; m <= m_max ; m++) {\n\n\n\n\t int s;\n\t float SSS;\n\t float f1,f2,p1,p2,TT,h,FFF;\n\t SSS = 0.0;\n\t for ( s = 2 ; s <= num_s ; s++) {\n\t f1 = f[ ( ((s-1) + (DD - 1))-1) ];\n\t f2 = f[ ( ((s) + (DD - 1))-1) ];\n\t p1 = cabsf(Ps[ ( ((m)-1) + (((n)-1)*(m_max)) + ((((s-1))-1)*(Ps_slice_stride)) ) ]) ;\n\t p2 = cabsf(Ps[ ( ((m)-1) + (((n)-1)*(m_max)) + (((s)-1)*(Ps_slice_stride)) ) ]) ;\n\n\t h = f2 - f1;\n\n\t FFF = (f1 + f2) / 2.0;\n\n\n\t TT = (1.0 / sqrtf(2.0)) * (((h * p1) + (0.5 * h * (p2 - p1))) * (1.0 / FFF));\n\n\t SSS += TT;\n\n\t }\n\n\t P[ ( ((m)-1) + ( ((n)-1)*m_max ))] = SSS + ((__extension__ 1.0iF) * 0.0);\n }\n } #pragma omp parallel for private(m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr29965-5.c", "omp_pragma_line": "#pragma omp parallel for schedule (static)", "context_chars": 100, "text": "a omp for schedule (static)\n for (i = 0; i < 2834; i++)\n baz ();\n}\n\nvoid\nfoo2 (void)\n{\n int i;\nfor (i = 0; i < 2834; i++)\n for (;;)\n ;\n}\n\nvoid\nbar2 (void)\n{\n int i;\n#pragma omp parallel for schedule (static)\n for (i = 0; i < 2834; i++)\n baz ();\n} #pragma omp parallel for schedule (static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr66820.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n/* { dg-do compile } */\n/* { dg-options \"-fopenmp\" } */\n\nvoid bar (char *);\n\nvoid\nfoo (char **x)\n{\nfor (int i = 0; i < 16; i++)\n {\n char y[50];\n __builtin_strcpy (y, x[i]);\n __builtin_strcat (y, \"foo\");\n bar (y);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-19.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)", "context_chars": 100, "text": "tions \"-O -fopenmp -fdump-tree-gimple\" } */\n\nvoid foo (int *a, int i, int j, int k, int l, int m)\n{\nfor (j = 0; j <= (6 * l + 4 * k); j++)\n a[j] = 1;\n#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)\n for (j = m; j <= l; j += (k + l - m))\n a[j] = 1;\n}\n\n/* { dg-final { scan-tree-dump-times \"shared\\\\(a\\\\)\" 2 \"gimple\" } } #pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-19.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)", "context_chars": 100, "text": "um_threads (3 * i) schedule (dynamic, i * 4)\n for (j = 0; j <= (6 * l + 4 * k); j++)\n a[j] = 1;\nfor (j = m; j <= l; j += (k + l - m))\n a[j] = 1;\n}\n\n/* { dg-final { scan-tree-dump-times \"shared\\\\(a\\\\)\" 2 \"gimple\" } } #pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/_Atomic-2.c", "omp_pragma_line": "#pragma omp parallel for\t ", "context_chars": 100, "text": "a omp for\t\t/* { dg-error \"'_Atomic' iteration variable 'i'\" } */\n for (i = 0; i < 64; i++)\n ;\n for (i = 0; i < 64; i++)\n ;\n #pragma omp simd\t\t/* { dg-error \"'_Atomic' iteration variable 'i'\" } #pragma omp parallel for\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/_Atomic-2.c", "omp_pragma_line": "#pragma omp parallel for simd\t ", "context_chars": 100, "text": " omp simd\t\t/* { dg-error \"'_Atomic' iteration variable 'i'\" } */\n for (i = 0; i < 64; i++)\n ;\n for (i = 0; i < 64; i++)\n ;\n #pragma omp for simd\t\t/* { dg-error \"'_Atomic' iteration variable 'i'\" } #pragma omp parallel for simd\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/_Atomic-2.c", "omp_pragma_line": "#pragma omp parallel for\t ", "context_chars": 100, "text": "* { dg-error \"'_Atomic' iteration variable 'j'\" } */\n for (_Atomic int j = 0; j < 64; j++)\n ;\n for (_Atomic int j = 0; j < 64; j++)\n ;\n #pragma omp simd\t\t/* { dg-error \"'_Atomic' iteration variable 'j'\" } #pragma omp parallel for\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/_Atomic-2.c", "omp_pragma_line": "#pragma omp parallel for simd\t ", "context_chars": 100, "text": "* { dg-error \"'_Atomic' iteration variable 'j'\" } */\n for (_Atomic int j = 0; j < 64; j++)\n ;\n for (_Atomic int j = 0; j < 64; j++)\n ;\n #pragma omp for simd\t\t/* { dg-error \"'_Atomic' iteration variable 'j'\" } #pragma omp parallel for simd\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/combined-1.c", "omp_pragma_line": "#pragma omp parallel for schedule(runtime)", "context_chars": 100, "text": "/\n/* { dg-options \"-O1 -fopenmp -fdump-tree-optimized\" } */\n\nint a[10];\nint foo (void)\n{\n int i;\nfor (i = 0; i < 10; i++)\n a[i] = i;\n#pragma omp parallel\n#pragma omp for schedule(runtime)\n for (i = 0; i < 10; i++)\n a[i] = 10 - i;\n#pragma omp parallel\n {\n#pragma omp for schedule(runtime)\n\tfor (i = 0; i < 10; i++)\n\t a[i] = i;\n } #pragma omp parallel for schedule(runtime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-18.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)", "context_chars": 100, "text": "ns \"-O -fopenmp -fdump-tree-ompexp\" } */\n\nvoid\nfoo (int *a, int i)\n{\n int j, k = 1, l = 30, m = 4;\nfor (j = 0; j <= l; j++)\n a[j] = 1;\n#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 2;\n#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n}\n\nvoid\nbar (int *a, int i)\n{\n int j, k = 1, l = 30, m = 4;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = 0; j <= l; j++)\n a[j] = 1;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 2;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n} #pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-18.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)", "context_chars": 100, "text": "parallel for num_threads (3 * i) schedule (dynamic, i * 4)\n for (j = 0; j <= l; j++)\n a[j] = 1;\nfor (j = k; j <= l; j += (m - 1))\n a[j] = 2;\n#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n}\n\nvoid\nbar (int *a, int i)\n{\n int j, k = 1, l = 30, m = 4;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = 0; j <= l; j++)\n a[j] = 1;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 2;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n} #pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-18.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)", "context_chars": 100, "text": "for num_threads (3 * i) schedule (dynamic, i * 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 2;\nfor (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n}\n\nvoid\nbar (int *a, int i)\n{\n int j, k = 1, l = 30, m = 4;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = 0; j <= l; j++)\n a[j] = 1;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 2;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n} #pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-18.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)", "context_chars": 100, "text": "omp parallel for num_threads (3 * i) schedule (dynamic, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\nfor (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n}\n\nvoid\nbar (int *a, int i)\n{\n int j, k = 1, l = 30, m = 4;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = 0; j <= l; j++)\n a[j] = 1;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 2;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n} #pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-18.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)", "context_chars": 100, "text": "k; j <= l; j += (m - 1))\n a[j] = 4;\n}\n\nvoid\nbar (int *a, int i)\n{\n int j, k = 1, l = 30, m = 4;\nfor (j = 0; j <= l; j++)\n a[j] = 1;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 2;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n}\n\n/* { dg-final { scan-tree-dump-times \"GOMP_parallel_loop_dynamic_start\" 4 \"ompexp\" { xfail *-*-* } } } #pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-18.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)", "context_chars": 100, "text": " parallel for num_threads (3 * i) schedule (guided, i * 4)\n for (j = 0; j <= l; j++)\n a[j] = 1;\nfor (j = k; j <= l; j += (m - 1))\n a[j] = 2;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n}\n\n/* { dg-final { scan-tree-dump-times \"GOMP_parallel_loop_dynamic_start\" 4 \"ompexp\" { xfail *-*-* } } } #pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-18.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)", "context_chars": 100, "text": " for num_threads (3 * i) schedule (guided, i * 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 2;\nfor (j = 0; j <= l; j++)\n a[j] = 3;\n#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n}\n\n/* { dg-final { scan-tree-dump-times \"GOMP_parallel_loop_dynamic_start\" 4 \"ompexp\" { xfail *-*-* } } } #pragma omp parallel for num_threads (3 * i) schedule (guided, 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-18.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)", "context_chars": 100, "text": " omp parallel for num_threads (3 * i) schedule (guided, 4)\n for (j = 0; j <= l; j++)\n a[j] = 3;\nfor (j = k; j <= l; j += (m - 1))\n a[j] = 4;\n}\n\n/* { dg-final { scan-tree-dump-times \"GOMP_parallel_loop_dynamic_start\" 4 \"ompexp\" { xfail *-*-* } } } #pragma omp parallel for num_threads (3 * i) schedule (guided, 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/sink-fold-3.c", "omp_pragma_line": "#pragma omp parallel for ordered(1)", "context_chars": 100, "text": "es. */\n\ntypedef struct {\n char stuff[400];\n} foo;\n\nvoid\nfunk (foo *begin, foo *end)\n{\n foo *p;\nfor (p=end; p > begin; p--)\n {\n#pragma omp ordered depend(sink:p+2) depend(sink:p+4)\n void bar ();\n bar();\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i:1) collapse(1)\t ", "context_chars": 100, "text": " .#pragma omp distribute parallel for.\" } */\n for (i = 0; i < 32; i++)\n ;\n}\n\nvoid\nf3 (void)\n{\n for (i = 0; i < 32; i++)\n ;\n}\n\nvoid\nf4 (void)\n{\n #pragma omp for linear (i:1) linear (j:2) collapse(2)\t/* { dg-error \"iteration variable .i. should not be linear\" } */\n for (i = 0; i < 32; i++)\t\t\t\t/* { dg-error \"iteration variable .j. should not be linear\" \"\" { target *-*-* } .-1 } */\n for (j = 0; j < 32; j+=2)\n ;\n} #pragma omp parallel for linear (i:1) collapse(1)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/linear-1.c", "omp_pragma_line": "#pragma omp parallel for linear (i:1) collapse(2) linear (j:2)\t ", "context_chars": 100, "text": "l for.\" } */\n for (i = 0; i < 32; i++)\n for (j = 0; j < 32; j+=2)\n ;\n}\n\nvoid\nf6 (void)\n{\n for (i = 0; i < 32; i++)\t\t\t\t\t\t/* { dg-error \"iteration variable .j. should not be linear\" \"\" { target *-*-* } .-1 } #pragma omp parallel for linear (i:1) collapse(2) linear (j:2)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr46032-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\nint\nfoo (void)\n{\n int a[N], b[N], c[N];\n int *ap = &a[0];\n int *bp = &b[0];\n int *cp = &c[0];\n\nfor (unsigned int idx = 0; idx < N; idx++)\n {\n ap[idx] = 1;\n bp[idx] = 2;\n cp[idx] = ap[idx];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr27388-1.c", "omp_pragma_line": "#pragma omp parallel for firstprivate (n)", "context_chars": 100, "text": "e-omplower\" } */\n\nint n, o;\n\nvoid\nfoo (void)\n{\n#pragma omp parallel firstprivate (n)\n {\n int i;\nfor (i = 0; i < 10; i++)\n ++n;\n#pragma omp atomic\n o += n;\n }\n}\n\n/* { dg-final { scan-tree-dump-times \"shared\\\\\\(i\\\\\\)\" 0 \"omplower\" } } #pragma omp parallel for firstprivate (n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-24.c", "omp_pragma_line": "#pragma omp parallel for schedule (nonmonotonic : dynamic, 4)", "context_chars": 100, "text": "g-options \"-O2 -fopenmp -fdump-tree-ssa\" } */\n\nextern void bar(int);\n\nvoid foo (void)\n{\n int i;\n\n for (i = 0; i < 37; ++i)\n bar(i);\n}\n\n/* { dg-final { scan-tree-dump-times \"GOMP_parallel_loop_nonmonotonic_dynamic\" 1 \"ssa\" } } #pragma omp parallel for schedule (nonmonotonic : dynamic, 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr38676.c", "omp_pragma_line": "#pragma omp parallel for shared(foo)", "context_chars": 100, "text": "38676 */\n/* { dg-do compile } */\n/* { dg-options \"-fopenmp\" } */\n\nint\nmain ()\n{\n int bar, foo = 1;\nfor (bar = 0; bar < 3; bar++)\n {\n switch (foo)\n\t{\n\tcase 1:\n\t break;\n\t}\n } #pragma omp parallel for shared(foo)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/sink-fold-2.c", "omp_pragma_line": "#pragma omp parallel for ordered(2)", "context_chars": 100, "text": "/* { dg-do compile } */\n\nint i,j, N;\n\nextern void bar();\n\nvoid\nfunk ()\n{\nfor (i=0; i < N; i += 3)\n for (j=0; j < N; ++j)\n {\n#pragma omp ordered depend(sink:i-8,j-1) /* { dg-warning \"refers to iteration never in the iteration space\" } */\n#pragma omp ordered depend(sink:i+3,j-1) /* { dg-warning \"waiting for lexically later iteration\" } */\n bar();\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr68128-1.c", "omp_pragma_line": "#pragma omp parallel for private (i, j, k, l, a, b, c, s, e)", "context_chars": 100, "text": ", float v, float w, float x, float y, float z, float t)\n{\n int i, j, k, l;\n float a, *b, c, s, e;\nfor (j = 0; j < 1024; j++)\n {\n k = j * 64;\n l = j * 64 + 63;\n a = v + j * w;\n b = u + j * 64;\n for (i = k; i <= l; i++, b++, a += w)\n\t{\n\t c = a * a + y;\n\t s = (1.f - c * x) * (1.f - c * x);\n\t e = t * (1 / __builtin_sqrtf (c)) * s;\n\t *b += (c < z ? e : 0);\n\t}\n } #pragma omp parallel for private (i, j, k, l, a, b, c, s, e)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr27388-2.c", "omp_pragma_line": "#pragma omp parallel for shared (i)", "context_chars": 100, "text": "dg-options \"-fopenmp -fdump-tree-omplower\" } */\n\nextern void baz (int);\n\nvoid\nfoo (void)\n{\n int i;\nfor (i = 0; i < 2; i++)\n baz (i);\n}\n\nvoid\nbar (void)\n{\n int j = 0;\n#pragma omp parallel shared (j)\n {\n j++;\n#pragma omp for\n for (j = 0; j < 2; j++)\n baz (j);\n }\n} #pragma omp parallel for shared (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-13.c", "omp_pragma_line": "#pragma omp parallel for default(none)", "context_chars": 100, "text": "}\n// { dg-options \"-fopenmp -fdump-tree-ompexp\" }\n\nextern void bar(int);\nvoid foo(void)\n{\n int i;\n\nfor (i = 0; i < 10; i++)\n bar(i);\n}\n\n// { dg-final { scan-tree-dump-times \"omp_data_o\" 0 \"ompexp\" } } #pragma omp parallel for default(none)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr49640.c", "omp_pragma_line": "#pragma omp parallel for private(ii,jj,kk)", "context_chars": 100, "text": " Q, int R, int i, int j, int k,\n unsigned char x[P][Q][R], int y[N][M][K])\n{\n int ii, jj, kk;\n\nfor (ii = 0; ii < P; ++ii)\n for (jj = 0; jj < Q; ++jj)\n for (kk = 0; kk < R; ++kk)\n\ty[i + ii][j + jj][k + kk] = x[ii][jj][kk];\n}\n\nvoid\nbar (int N, int M, int K, int P, int Q, int R, int i, int j, int k,\n unsigned char x[P][Q][R], float y[N][M][K], float factor, float zero)\n{\n int ii, jj, kk;\n\n#pragma omp parallel for private(ii,jj,kk)\n for (ii = 0; ii < P; ++ii)\n for (jj = 0; jj < Q; ++jj)\n for (kk = 0; kk < R; ++kk)\n\ty[i + ii][j + jj][k + kk] = factor * x[ii][jj][kk] + zero;\n} #pragma omp parallel for private(ii,jj,kk)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/sharing-1.c", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": ", j, s, l;\n\n p = malloc (sizeof (int));\n if (p == NULL)\n return 0;\n *p = 7;\n s = 6;\n l = 0;\ndefault (none) private (p) shared (s) \n for (i = 0; i < 64; i++)\n {\n int k = foo (0);\t/* Predetermined - private (automatic var declared */\n k++;\t\t/* in scope of construct). */\n thrglobalvar++;\t/* Predetermined - threadprivate. */\n thrlocvar++;\t/* Predetermined - threadprivate. */\n foo (i);\t\t/* Predetermined - private (omp for loop variable). */\n foo (constvar);\t/* Predetermined - shared (const qualified type). */\n foo (*p);\t\t/* *p predetermined - shared (heap allocated */\n (*p)++;\t\t/* storage). */\n bar (p);\t\t/* Explicitly determined - private. */\n foo (s);\t\t/* Explicitly determined - shared. */\n globalvar++;\t/* { dg-error \"not specified in\" } */\n locvar++;\t\t/* { dg-error \"not specified in\" } */\n l++;\t\t/* { dg-error \"not specified in\" } */\n for (j = 0; j < 2; j++); /* { dg-error \"not specified in\" } */\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/sink-fold-1.c", "omp_pragma_line": "#pragma omp parallel for ordered(3)", "context_chars": 100, "text": "\" } */\n\n/* Test depend(sink) clause folding. */\n\nint i,j,k, N;\n\nextern void bar();\n\nvoid\nfunk ()\n{\nfor (i=0; i < N; i++)\n for (j=0; j < N; ++j)\n for (k=0; k < N; ++k)\n {\n/* We remove the (sink:i,j-1,k) by virtue of it the i+0. The remaining\n clauses get folded with a GCD of -2 for `i' and a maximum of -2, +2 for\n 'j' and 'k'. */\n#pragma omp ordered \\\n depend(sink:i-8,j-2,k+2) \\\n depend(sink:i, j-1,k) \\\n depend(sink:i-4,j-3,k+6) \\\n depend(sink:i-6,j-4,k-6)\n bar();\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr27415.c", "omp_pragma_line": "#pragma omp parallel for firstprivate (i)\t ", "context_chars": 100, "text": "ould not be firstprivate\" } */\n for (i = 0; i < 10; i++)\n ;\n}\n\nvoid\ntest2 (void)\n{\n int i = 0;\nfor (i = 0; i < 10; i++)\n ;\n}\n\nvoid\ntest3 (void)\n{\n int i = 0;\n#pragma omp parallel\n#pragma omp for reduction (+:i)\t\t\t/* { dg-error \"should not be reduction\" } */\n for (i = 0; i < 10; i++)\n ;\n} #pragma omp parallel for firstprivate (i)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr27415.c", "omp_pragma_line": "#pragma omp parallel for reduction (*:i)\t ", "context_chars": 100, "text": "\"should not be reduction\" } */\n for (i = 0; i < 10; i++)\n ;\n}\n\nvoid\ntest4 (void)\n{\n int i = 0;\nfor (i = 0; i < 10; i++)\n ;\n}\n\nvoid\ntest5 (void)\n{\n int i = 0;\n#pragma omp parallel firstprivate (i)\n#pragma omp for\n for (i = 0; i < 10; i++)\n ;\n} #pragma omp parallel for reduction (*:i)\t "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/openmp-simd-2.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(4) safelen(64)", "context_chars": 100, "text": "!= s.s || u != s.s)\n abort ();\n return s.s;\n}\n\n\nvoid bar(int n, float *a, float *b)\n{\n int i; \nfor (i = 0; i < n ; i++)\n a[i] = b[i];\n}\n\n/* { dg-final { scan-tree-dump-times \"pragma omp simd reduction\\\\(u\\\\) reduction\\\\(t\\\\) reduction\\\\(\\\\+:s\\\\) aligned\\\\(a:32\\\\)\" 1 \"original\" } } #pragma omp parallel for simd num_threads(4) safelen(64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/_Atomic-1.c", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic, b)", "context_chars": 100, "text": " (i = 0; i < 16; i++)\n ;\n #pragma omp taskloop grainsize (c)\n for (i = 0; i < 16; i++)\n ;\n for (i = 0; i < 16; i++)\n ;\n j = 0;\n #pragma omp simd linear(j:b)\n for (i = 0; i < 16; i++)\n j += b;\n j = 4;\n #pragma omp atomic read\n b = j;\n #pragma omp atomic write\n j = c;\n #pragma omp atomic\n j += c;\n #pragma omp atomic capture\n b = j += c;\n #pragma omp atomic capture\n b = ++j;\n #pragma omp atomic capture\n { b = j; j = c; } #pragma omp parallel for schedule (dynamic, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr30421.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a) lastprivate(a)", "context_chars": 100, "text": "*/\n/* { dg-do compile } */\n/* { dg-options \"-O2 -fopenmp -Wall\" } */\n\nint\nfoo ()\n{\n int a = 0, i;\n\nfor (i = 0; i < 10; i++)\n a += i;\n\n return a;\n}\n\nint\nbar ()\n{\n int a = 0, i;\n\n#pragma omp parallel for firstprivate(a) lastprivate(a) schedule(static, 2)\n for (i = 0; i < 10; i++)\n a += i;\n\n return a;\n} #pragma omp parallel for firstprivate(a) lastprivate(a)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr30421.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a) lastprivate(a) schedule(static, 2)", "context_chars": 100, "text": "lastprivate(a)\n for (i = 0; i < 10; i++)\n a += i;\n\n return a;\n}\n\nint\nbar ()\n{\n int a = 0, i;\n\nfor (i = 0; i < 10; i++)\n a += i;\n\n return a;\n}\n\nint\nbaz ()\n{\n int a = 0, i;\n\n#pragma omp parallel for firstprivate(a) lastprivate(a) schedule(dynamic)\n for (i = 0; i < 10; i++)\n a += i;\n\n return a;\n} #pragma omp parallel for firstprivate(a) lastprivate(a) schedule(static, 2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/static-chunk-size-one.c", "omp_pragma_line": "#pragma omp parallel for num_threads (3) reduction (+:a) schedule(static, 1)", "context_chars": 100, "text": "{ dg-options \"-fopenmp -O2 -fdump-tree-optimized -fno-tree-pre\" } */\n\nint\nbar ()\n{\n int a = 0, i;\n\nfor (i = 0; i < 10; i++)\n a += i;\n\n return a;\n}\n\n/* Two phis for reduction, one in loop header, one in loop exit. One phi for iv\n in loop header. */\n/* { dg-final { scan-tree-dump-times \"PHI\" 3 \"optimized\" } } #pragma omp parallel for num_threads (3) reduction (+:a) schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr29965-6.c", "omp_pragma_line": "#pragma omp parallel for schedule (static, 16)", "context_chars": 100, "text": "p for schedule (static, 16)\n for (i = 0; i < 2834; i++)\n baz ();\n}\n\nvoid\nfoo2 (void)\n{\n int i;\nfor (i = 0; i < 2834; i++)\n for (;;)\n ;\n}\n\nvoid\nbar2 (void)\n{\n int i;\n#pragma omp parallel for schedule (static, 16)\n for (i = 0; i < 2834; i++)\n baz ();\n} #pragma omp parallel for schedule (static, 16)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr68640.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "enmp -fdump-tree-ealias-all\" } */\n\n#define N 1024\n\nint\nfoo (int *__restrict__ ap)\n{\n int *bp = ap;\nfor (unsigned int idx = 0; idx < N; idx++)\n ap[idx] = bp[idx];\n}\n\n/* { dg-final { scan-tree-dump-times \"clique 1 base 1\" 2 \"ealias\" } } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr46032-3.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "e N 2\n\nint\nfoo (void)\n{\n int a[N], c[N];\n int *ap = &a[0];\n int *bp = &a[0];\n int *cp = &c[0];\n\nfor (unsigned int idx = 0; idx < N; idx++)\n {\n ap[idx] = 1;\n bp[idx] = 2;\n cp[idx] = ap[idx];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr53992.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " long data[10000];\n long i, min=10000;\n for (i=0; i<10000; i++) data[i] = -i;\n \nfor (i=0; i<10000; i++) {\n __transaction_atomic\n {\n if (data[i] < min)\n min = data[i];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/block-8.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "// { dg-do compile }\n// PR 24451\n\nint foo()\n{\n int i;\n\n for (i = 0; i < 10; ++i)\n return 0; // { dg-error \"invalid branch to/from OpenMP structured block\" } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/appendix-a/a.31.1.c", "omp_pragma_line": "#pragma omp parallel for private(i) shared(x, y, n) \\", "context_chars": 100, "text": "-do compile } */\n\nvoid\na31_1 (float *x, int *y, int n)\n{\n int i, b;\n float a;\n a = 0.0;\n b = 0;\nreduction(+:a) reduction(^:b)\n for (i = 0; i < n; i++)\n {\n a += x[i];\n b ^= y[i];\n } #pragma omp parallel for private(i) shared(x, y, n) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/appendix-a/a.27.1.c", "omp_pragma_line": "#pragma omp parallel for private(a)", "context_chars": 100, "text": "/* { dg-do compile } */\n\nvoid\na27 ()\n{\n int i, a;\n#pragma omp parallel private(a)\n {\nfor (i = 0; i < 10; i++)\n {\n\t/* do work here */\n } #pragma omp parallel for private(a)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/appendix-a/a.26.2.c", "omp_pragma_line": "#pragma omp parallel for private(a)", "context_chars": 100, "text": "int k)\n{\n a = k;\t\t\t/* The global \"a\", not the private \"a\" in f */\n}\n\nvoid\nf (int n)\n{\n int a = 0;\nfor (int i = 1; i < n; i++)\n {\n a = i;\n g (a * 2);\t\t/* Private copy of \"a\" */\n } #pragma omp parallel for private(a)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/vect/pr46032.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "sults[nEvents];\n unsigned pData[nEvents];\n unsigned coeff = 2;\n\n init (&results[0], &pData[0]);\n\nfor (int idx = 0; idx < (int)nEvents; idx++)\n results[idx] = coeff * pData[idx];\n\n check (&results[0]);\n\n return 0;\n}\n\n/* { dg-final { scan-tree-dump-times \"note: vectorized 1 loop\" 1 \"vect\" { xfail { vect_no_align && { ! vect_hw_misalign } } } } } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) schedule(static)", "context_chars": 100, "text": "ne_to_coarse = hypre_CTAlloc(int, n_fine);\n\n n_coarse = 0;\n n_SF = 0;\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < n_fine; i++)\n if (CF_marker[i] == 1) n_coarse++;\n else if (CF_marker[i] == -3) n_SF++;\n\n pass_array_size = n_fine-n_coarse-n_SF;\n if (pass_array_size) pass_array = hypre_CTAlloc(int, pass_array_size);\n pass_pointer = hypre_CTAlloc(int, max_num_passes+1);\n if (n_fine) assigned = hypre_CTAlloc(int, n_fine);\n\n {\n P_diag_i = hypre_CTAlloc(int, n_fine+1);\n P_offd_i = hypre_CTAlloc(int, n_fine+1);\n } #pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) schedule(static)", "context_chars": 100, "text": "stroy(comm_handle);\n }\n }\n\n n_coarse_offd = 0;\n n_SF_offd = 0;\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < num_cols_offd; i++)\n if (CF_marker_offd[i] == 1) n_coarse_offd++;\n else if (CF_marker_offd[i] == -3) n_SF_offd++;\n\n if (num_cols_offd)\n {\n assigned_offd = hypre_CTAlloc(int, num_cols_offd);\n map_S_to_new = hypre_CTAlloc(int, num_cols_offd);\n fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd);\n new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, n_coarse_offd);\n } #pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) schedule(static)", "context_chars": 100, "text": "ap_start[pass][0] = 0;\n\n for (i=0; i < num_sends; i++)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (j=send_map_start[i]; j < send_map_start[i+1]; j++)\n {\n j1 = send_map_elmt[j];\n\t if (assigned[j1] == pass-1)\n\t {\n\t P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1];\n\t Pext_send_size += P_ncols[j];\n }\n } #pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,i1) schedule(static)", "context_chars": 100, "text": " P_offd_i[i+1] += P_offd_i[i];\n }\n\n/* determine P for coarse points */\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < n_coarse; i++)\n {\n i1 = C_array[i];\n P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1];\n P_diag_data[P_diag_i[i1]] = 1.0;\n } #pragma omp parallel for private(i,i1) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule(static)", "context_chars": 100, "text": " beta = gamma / gamma_old;\n \n /* p = s + beta p */\n#ifdef HYPRE_USING_OPENMP\nfor (j=0; j < local_size; j++)\n {\n p_data[j] = s_data[j] + beta*p_data[j];\n } #pragma omp parallel for private(j) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(i,diag,scale) schedule(static)", "context_chars": 100, "text": " /*d_0* = 1/theta * inv(M)r_0 - M is Jacobi*/\n /* x_1 = x_0 + d_0 */\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n {\n diag = A_diag_data[A_diag_i[i]];\n \n scale = temp1/diag;\n dk[i] = scale*v_data[i];\n u_data[i] += dk[i];\n \n } #pragma omp parallel for private(i,diag,scale) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(i,diag,scale) schedule(static)", "context_chars": 100, "text": "2.0*sigma - p_k);\n temp1 = p_kp1*p_k;\n temp2 = 2.0*p_kp1/delta;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n {\n diag = A_diag_data[A_diag_i[i]];\n \n scale = temp2/diag;\n dk[i] = temp1*dk[i] + scale*v_data[i];\n u_data[i] += dk[i];\n } #pragma omp parallel for private(i,diag,scale) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule(static) ", "context_chars": 100, "text": " hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);\n mult = coefs[i];\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ )\n {\n u_data[j] = mult * r_data[j] + v_data[j];\n } #pragma omp parallel for private(j) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static) ", "context_chars": 100, "text": " u_data[j] = mult * r_data[j] + v_data[j];\n }\n \n }\n\n#ifdef HYPRE_USING_OPENMP\nfor ( i = 0; i < num_rows; i++ ) \n {\n u_data[i] = orig_u[i] + u_data[i];\n } #pragma omp parallel for private(i) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j,diag) schedule(static) ", "context_chars": 100, "text": "s_data and get scaled residual: r = D^(-1/2)f -\n * D^(-1/2)A*u */\n\n\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_rows; j++)\n {\n diag = A_diag_data[A_diag_i[j]];\n ds_data[j] = 1/sqrt(diag);\n\n r_data[j] = ds_data[j] * f_data[j];\n } #pragma omp parallel for private(j,diag) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule(static) ", "context_chars": 100, "text": "ta[j];\n }\n\n hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ ) \n {\n r_data[j] += ds_data[j] * tmp_data[j];\n } #pragma omp parallel for private(j) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule(static) ", "context_chars": 100, "text": "then start \n the iteration by multiplying r by the cheby coef.*/\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ ) \n {\n orig_u[j] = u_data[j]; /* orig, unscaled u */\n\n u_data[j] = r_data[j] * coefs[cheby_order]; \n } #pragma omp parallel for private(j) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule(static) ", "context_chars": 100, "text": "y_order - 1; i >= 0; i-- ) \n {\n /* v = D^(-1/2)AD^(-1/2)u */\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ )\n {\n tmp_data[j] = ds_data[j] * u_data[j];\n } #pragma omp parallel for private(j) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j,tmp_d) schedule(static) ", "context_chars": 100, "text": "ec, 0.0, v);\n\n /* u_new = coef*r + v*/\n mult = coefs[i];\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ )\n {\n tmp_d = ds_data[j]* v_data[j];\n u_data[j] = mult * r_data[j] + tmp_d;\n } #pragma omp parallel for private(j,tmp_d) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule(static) ", "context_chars": 100, "text": " */\n\n\n /* now we have to scale u_data before adding it to u_orig*/\n\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ ) \n {\n u_data[j] = orig_u[j] + ds_data[j]*u_data[j];\n } #pragma omp parallel for private(j) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(i,diag,scale) schedule(static)", "context_chars": 100, "text": "0/upper_bound;\n hypre_ParVectorAxpy(scale, Ztemp, v);\n /* END NEW */\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n {\n diag = 1;\n scale = temp1/diag;\n dk[i] = scale*v_data[i];\n u_data[i] += dk[i];\n \n } #pragma omp parallel for private(i,diag,scale) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(i,diag,scale) schedule(static)", "context_chars": 100, "text": "_bound;\n hypre_ParVectorAxpy(scale, Ztemp, v);\n /* END NEW */\n\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n {\n diag = 1;\n scale = temp2/diag;\n dk[i] = temp1*dk[i] + scale*v_data[i];\n u_data[i] += dk[i];\n } #pragma omp parallel for private(i,diag,scale) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": " \n int ierr = 0;\n\n size *=hypre_VectorNumVectors(v);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n vector_data[i] = value;\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_SeqVectorSetRandomValues\n *\n * returns vector of values randomly distributed between -1.0 and +1.0\n *--------------------------------------------------------------------------*/\n\nint\nhypre_SeqVectorSetRandomValues( hypre_Vector *v,\n int seed )\n{\n double *vector_data = hypre_VectorData(v);\n int size = hypre_VectorSize(v);\n \n int i;\n \n int ierr = 0;\n hypre_SeedRand(seed);\n\n size *=hypre_VectorNumVectors(v);\n\n/* RDF: threading this loop may cause problems because of hypre_Rand() */\n for (i = 0; i < size; i++)\n vector_data[i] = 2.0 * hypre_Rand() - 1.0;\n\n return ierr;\n} #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": ";\n \n int ierr = 0;\n\n size *=hypre_VectorNumVectors(x);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n y_data[i] = x_data[i];\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_SeqVectorCloneDeep\n * Returns a complete copy of x - a deep copy, with its own copy of the data.\n *--------------------------------------------------------------------------*/\n\nhypre_Vector *\nhypre_SeqVectorCloneDeep( hypre_Vector *x )\n{\n int size = hypre_VectorSize(x);\n int num_vectors = hypre_VectorNumVectors(x);\n hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );\n\n hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);\n hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);\n hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);\n\n hypre_SeqVectorInitialize(y);\n hypre_SeqVectorCopy( x, y );\n\n return y;\n} #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": "\n \n int ierr = 0;\n\n size *=hypre_VectorNumVectors(y);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n y_data[i] *= alpha;\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_SeqVectorAxpy\n *--------------------------------------------------------------------------*/\n\nint\nhypre_SeqVectorAxpy( double alpha,\n hypre_Vector *x,\n hypre_Vector *y )\n{\n double *x_data = hypre_VectorData(x);\n double *y_data = hypre_VectorData(y);\n int size = hypre_VectorSize(x);\n \n int i;\n \n int ierr = 0;\n\n size *=hypre_VectorNumVectors(x);\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n for (i = 0; i < size; i++)\n y_data[i] += alpha * x_data[i];\n\n return ierr;\n} #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": "\n \n int ierr = 0;\n\n size *=hypre_VectorNumVectors(x);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n y_data[i] += alpha * x_data[i];\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_SeqVectorInnerProd\n *--------------------------------------------------------------------------*/\n\ndouble hypre_SeqVectorInnerProd( hypre_Vector *x,\n hypre_Vector *y )\n{\n double *x_data = hypre_VectorData(x);\n double *y_data = hypre_VectorData(y);\n int size = hypre_VectorSize(x);\n \n int i;\n\n double result = 0.0;\n\n size *=hypre_VectorNumVectors(x);\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) reduction(+:result) schedule(static)\n\n for (i = 0; i < size; i++)\n result += y_data[i] * x_data[i];\n\n return result;\n} #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:result) schedule(static)", "context_chars": 100, "text": " i;\n\n double result = 0.0;\n\n size *=hypre_VectorNumVectors(x);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n result += y_data[i] * x_data[i];\n\n return result;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_VectorSumElts:\n * Returns the sum of all vector elements.\n *--------------------------------------------------------------------------*/\n\ndouble hypre_VectorSumElts( hypre_Vector *vector )\n{\n double sum = 0;\n double * data = hypre_VectorData( vector );\n int size = hypre_VectorSize( vector );\n int i;\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) reduction(+:sum) schedule(static)\n\n for ( i=0; i #pragma omp parallel for private(i) reduction(+:result) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static) ", "context_chars": 100, "text": "------------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows*num_vectors; i++)\n y_data[i] *= beta;\n\n return ierr;\n }\n\n /*-----------------------------------------------------------------------\n * y = (beta/alpha)*y\n *-----------------------------------------------------------------------*/\n \n temp = beta / alpha;\n \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static) \n\n\t for (i = 0; i < num_rows*num_vectors; i++)\n\t y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static) \n\n\t for (i = 0; i < num_rows*num_vectors; i++)\n\t y_data[i] *= temp;\n }\n } #pragma omp parallel for private(i) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static) ", "context_chars": 100, "text": "beta / alpha;\n \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows*num_vectors; i++)\n\t y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static) \n\n\t for (i = 0; i < num_rows*num_vectors; i++)\n\t y_data[i] *= temp;\n } #pragma omp parallel for private(i) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static) ", "context_chars": 100, "text": "m_rows*num_vectors; i++)\n\t y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows*num_vectors; i++)\n\t y_data[i] *= temp;\n }\n }\n\n /*-----------------------------------------------------------------\n * y += A*x\n *-----------------------------------------------------------------*/\n\n if (num_rownnz < xpar*(num_rows))\n {\n\n/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,jj,j,m,tempx) schedule(static)\n\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n\n /*\n * for (jj = A_i[m]; jj < A_i[m+1]; jj++)\n * {\n * j = A_j[jj]; \n * y_data[m] += A_data[jj] * x_data[j];\n * } */\n if ( num_vectors==1 )\n {\n tempx = y_data[m];\n for (jj = A_i[m]; jj < A_i[m+1]; jj++) \n tempx += A_data[jj] * x_data[A_j[jj]];\n y_data[m] = tempx;\n }\n else\n for ( j=0; j #pragma omp parallel for private(i) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,jj,j,m,tempx) schedule(static)", "context_chars": 100, "text": "to do the A*x multiplication when num_rownnz is smaller than num_rows */\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n\n /*\n * for (jj = A_i[m]; jj < A_i[m+1]; jj++)\n * {\n * j = A_j[jj]; \n * y_data[m] += A_data[jj] * x_data[j];\n * } */\n if ( num_vectors==1 )\n {\n tempx = y_data[m];\n for (jj = A_i[m]; jj < A_i[m+1]; jj++) \n tempx += A_data[jj] * x_data[A_j[jj]];\n y_data[m] = tempx;\n }\n else\n for ( j=0; j #pragma omp parallel for private(i,jj,j,m,tempx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,jj,temp,j) schedule(static)", "context_chars": 100, "text": "ride_y + m*idxstride_y] = tempx;\n }\n }\n\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n {\n if ( num_vectors==1 )\n {\n temp = y_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n temp += A_data[jj] * x_data[A_j[jj]];\n y_data[i] = temp;\n }\n else\n for ( j=0; j #pragma omp parallel for private(i,jj,temp,j) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": "--------------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows*num_vectors; i++)\n\t y_data[i] *= alpha;\n }\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_CSRMatrixMatvecT\n *\n * This version is using a different (more efficient) threading scheme\n\n * Performs y <- alpha * A^T * x + beta * y\n *\n * From Van Henson's modification of hypre_CSRMatrixMatvec.\n *--------------------------------------------------------------------------*/\n\nint\nhypre_CSRMatrixMatvecT( double alpha,\n hypre_CSRMatrix *A,\n hypre_Vector *x,\n double beta,\n hypre_Vector *y )\n{\n double *A_data = hypre_CSRMatrixData(A);\n int *A_i = hypre_CSRMatrixI(A);\n int *A_j = hypre_CSRMatrixJ(A);\n int num_rows = hypre_CSRMatrixNumRows(A);\n int num_cols = hypre_CSRMatrixNumCols(A);\n\n double *x_data = hypre_VectorData(x);\n double *y_data = hypre_VectorData(y);\n int x_size = hypre_VectorSize(x);\n int y_size = hypre_VectorSize(y);\n int num_vectors = hypre_VectorNumVectors(x);\n int idxstride_y = hypre_VectorIndexStride(y);\n int vecstride_y = hypre_VectorVectorStride(y);\n int idxstride_x = hypre_VectorIndexStride(x);\n int vecstride_x = hypre_VectorVectorStride(x);\n\n double temp;\n\n double *y_data_expand = NULL;\n int offset = 0;\n#ifdef HYPRE_USING_OPENMP\n int my_thread_num = 0;\n\n \n int i, j, jv, jj;\n int num_threads;\n\n int ierr = 0;\n\n /*---------------------------------------------------------------------\n * Check for size compatibility. MatvecT returns ierr = 1 if\n * length of X doesn't equal the number of rows of A,\n * ierr = 2 if the length of Y doesn't equal the number of \n * columns of A, and ierr = 3 if both are true.\n *\n * Because temporary vectors are often used in MatvecT, none of \n * these conditions terminates processing, and the ierr flag\n * is informational only.\n *--------------------------------------------------------------------*/\n\n hypre_assert( num_vectors == hypre_VectorNumVectors(y) );\n \n if (num_rows != x_size)\n ierr = 1;\n\n if (num_cols != y_size)\n ierr = 2;\n\n if (num_rows != x_size && num_cols != y_size)\n ierr = 3;\n /*-----------------------------------------------------------------------\n * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS\n *-----------------------------------------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n for (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] *= beta;\n\n return ierr;\n }\n\n /*-----------------------------------------------------------------------\n * y = (beta/alpha)*y\n *-----------------------------------------------------------------------*/\n\n temp = beta / alpha;\n \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n\t for (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n\t for (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] *= temp;\n }\n }\n\n /*-----------------------------------------------------------------\n * y += A^T*x\n *-----------------------------------------------------------------*/\n num_threads = hypre_NumThreads();\n if (num_threads > 1)\n {\n y_data_expand = hypre_CTAlloc(double, num_threads*y_size);\n \n if ( num_vectors==1 )\n {\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel private(i,jj,j, my_thread_num, offset) \n { \n my_thread_num = omp_get_thread_num();\n offset = y_size*my_thread_num;\n#pragma omp for schedule(static)\n\n for (i = 0; i < num_rows; i++)\n {\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n j = A_j[jj];\n y_data_expand[offset + j] += A_data[jj] * x_data[i];\n }\n }\n#ifdef HYPRE_USING_OPENMP\n /* implied barrier */ \n#pragma omp for schedule(static)\n\n for (i = 0; i < y_size; i++)\n {\n for (j = 0; j < num_threads; j++)\n {\n y_data[i] += y_data_expand[j*y_size + i];\n /*y_data_expand[j*y_size + i] = 0; //zero out for next time */\n }\n }\n#ifdef HYPRE_USING_OPENMP\n } /* end parallel region */\n \n hypre_TFree(y_data_expand);\n }\n else\n {\n /* MULTIPLE VECTORS NOT THREADED YET */\n for (i = 0; i < num_rows; i++)\n {\n for ( jv=0; jv #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": "--------------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] *= beta;\n\n return ierr;\n }\n\n /*-----------------------------------------------------------------------\n * y = (beta/alpha)*y\n *-----------------------------------------------------------------------*/\n\n temp = beta / alpha;\n \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n\t for (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n\t for (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] *= temp;\n }\n } #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": "beta / alpha;\n \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n\t for (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] *= temp;\n } #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": "m_cols*num_vectors; i++)\n\t y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] *= temp;\n }\n }\n\n /*-----------------------------------------------------------------\n * y += A^T*x\n *-----------------------------------------------------------------*/\n num_threads = hypre_NumThreads();\n if (num_threads > 1)\n {\n y_data_expand = hypre_CTAlloc(double, num_threads*y_size);\n \n if ( num_vectors==1 )\n {\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel private(i,jj,j, my_thread_num, offset) \n { \n my_thread_num = omp_get_thread_num();\n offset = y_size*my_thread_num;\n#pragma omp for schedule(static)\n\n for (i = 0; i < num_rows; i++)\n {\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n j = A_j[jj];\n y_data_expand[offset + j] += A_data[jj] * x_data[i];\n }\n }\n#ifdef HYPRE_USING_OPENMP\n /* implied barrier */ \n#pragma omp for schedule(static)\n\n for (i = 0; i < y_size; i++)\n {\n for (j = 0; j < num_threads; j++)\n {\n y_data[i] += y_data_expand[j*y_size + i];\n /*y_data_expand[j*y_size + i] = 0; //zero out for next time */\n }\n }\n#ifdef HYPRE_USING_OPENMP\n } /* end parallel region */\n \n hypre_TFree(y_data_expand);\n }\n else\n {\n /* MULTIPLE VECTORS NOT THREADED YET */\n for (i = 0; i < num_rows; i++)\n {\n for ( jv=0; jv #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": "--------------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_cols*num_vectors; i++)\n\t y_data[i] *= alpha;\n }\n\n return ierr;\n}\n\n\n/*--------------------------------------------------------------------------\n * hypre_CSRMatrixMatvec_FF\n *--------------------------------------------------------------------------*/\n \nint\nhypre_CSRMatrixMatvec_FF( double alpha,\n hypre_CSRMatrix *A,\n hypre_Vector *x,\n double beta,\n hypre_Vector *y,\n int *CF_marker_x,\n int *CF_marker_y,\n int fpt )\n{\n double *A_data = hypre_CSRMatrixData(A);\n int *A_i = hypre_CSRMatrixI(A);\n int *A_j = hypre_CSRMatrixJ(A);\n int num_rows = hypre_CSRMatrixNumRows(A);\n int num_cols = hypre_CSRMatrixNumCols(A);\n \n double *x_data = hypre_VectorData(x);\n double *y_data = hypre_VectorData(y);\n int x_size = hypre_VectorSize(x);\n int y_size = hypre_VectorSize(y);\n \n double temp;\n \n int i, jj;\n \n int ierr = 0;\n \n \n /*---------------------------------------------------------------------\n * Check for size compatibility. Matvec returns ierr = 1 if\n * length of X doesn't equal the number of columns of A,\n * ierr = 2 if the length of Y doesn't equal the number of rows\n * of A, and ierr = 3 if both are true.\n *\n * Because temporary vectors are often used in Matvec, none of\n * these conditions terminates processing, and the ierr flag\n * is informational only.\n *--------------------------------------------------------------------*/\n \n if (num_cols != x_size)\n ierr = 1;\n \n if (num_rows != y_size)\n ierr = 2;\n \n if (num_cols != x_size && num_rows != y_size)\n ierr = 3;\n \n /*-----------------------------------------------------------------------\n * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS\n *-----------------------------------------------------------------------*/\n \n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= beta;\n \n return ierr;\n }\n \n /*-----------------------------------------------------------------------\n * y = (beta/alpha)*y\n *-----------------------------------------------------------------------*/\n \n temp = beta / alpha;\n \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= temp;\n }\n }\n \n /*-----------------------------------------------------------------\n * y += A*x\n *-----------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,jj,temp) schedule(static)\n \n for (i = 0; i < num_rows; i++)\n {\n if (CF_marker_x[i] == fpt)\n {\n temp = y_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];\n y_data[i] = temp;\n }\n }\n \n \n /*-----------------------------------------------------------------\n * y = alpha*y\n *-----------------------------------------------------------------*/\n \n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= alpha;\n }\n \n return ierr;\n} #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": " \n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= beta;\n \n return ierr;\n }\n \n /*-----------------------------------------------------------------------\n * y = (beta/alpha)*y\n *-----------------------------------------------------------------------*/\n \n temp = beta / alpha;\n \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= temp;\n }\n } #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": " \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) schedule(static)\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= temp;\n } #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule(static)", "context_chars": 100, "text": " if (CF_marker_x[i] == fpt) y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= temp;\n }\n }\n \n /*-----------------------------------------------------------------\n * y += A*x\n *-----------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,jj,temp) schedule(static)\n \n for (i = 0; i < num_rows; i++)\n {\n if (CF_marker_x[i] == fpt)\n {\n temp = y_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];\n y_data[i] = temp;\n }\n } #pragma omp parallel for private(i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,jj,temp) schedule(static)", "context_chars": 100, "text": " \n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n {\n if (CF_marker_x[i] == fpt)\n {\n temp = y_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];\n y_data[i] = temp;\n }\n } #pragma omp parallel for private(i,jj,temp) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": " Index_t numElem)\n{\n //\n // pull in the stresses appropriate to the hydro integration\n //\n\nfor (Index_t i = 0 ; i < numElem ; ++i){\n sigxx[i] = sigyy[i] = sigzz[i] = - domain.p(i) - domain.q(i) ;\n } #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": "te(numElem8) ;\n fz_elem = Allocate(numElem8) ;\n }\n // loop over all elements\n\nfor( Index_t k=0 ; k 1) {\n // Eliminate thread writing conflicts at the nodes by giving\n // each element its own copy to write to\n SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],\n &fx_elem[k*8],\n &fy_elem[k*8],\n &fz_elem[k*8] ) ;\n }\n else {\n SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],\n fx_local, fy_local, fz_local ) ;\n\n // copy nodal force contributions to global force arrray.\n for( Index_t lnode=0 ; lnode<8 ; ++lnode ) {\n Index_t gnode = elemToNode[lnode];\n domain.fx(gnode) += fx_local[lnode];\n domain.fy(gnode) += fy_local[lnode];\n domain.fz(gnode) += fz_local[lnode];\n }\n }\n } #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": "we need to copy the data out of the temporary\n // arrays used above into the final forces field\nfor( Index_t gnode=0 ; gnode #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem, hourg)", "context_chars": 100, "text": "t(-1.);\n\n/*************************************************/\n/* compute the hourglass modes */\n\n\nfor(Index_t i2=0;i2 1) {\n fx_local = &fx_elem[i3] ;\n fx_local[0] = hgfx[0];\n fx_local[1] = hgfx[1];\n fx_local[2] = hgfx[2];\n fx_local[3] = hgfx[3];\n fx_local[4] = hgfx[4];\n fx_local[5] = hgfx[5];\n fx_local[6] = hgfx[6];\n fx_local[7] = hgfx[7];\n\n fy_local = &fy_elem[i3] ;\n fy_local[0] = hgfy[0];\n fy_local[1] = hgfy[1];\n fy_local[2] = hgfy[2];\n fy_local[3] = hgfy[3];\n fy_local[4] = hgfy[4];\n fy_local[5] = hgfy[5];\n fy_local[6] = hgfy[6];\n fy_local[7] = hgfy[7];\n\n fz_local = &fz_elem[i3] ;\n fz_local[0] = hgfz[0];\n fz_local[1] = hgfz[1];\n fz_local[2] = hgfz[2];\n fz_local[3] = hgfz[3];\n fz_local[4] = hgfz[4];\n fz_local[5] = hgfz[5];\n fz_local[6] = hgfz[6];\n fz_local[7] = hgfz[7];\n }\n else {\n domain.fx(n0si2) += hgfx[0];\n domain.fy(n0si2) += hgfy[0];\n domain.fz(n0si2) += hgfz[0];\n\n domain.fx(n1si2) += hgfx[1];\n domain.fy(n1si2) += hgfy[1];\n domain.fz(n1si2) += hgfz[1];\n\n domain.fx(n2si2) += hgfx[2];\n domain.fy(n2si2) += hgfy[2];\n domain.fz(n2si2) += hgfz[2];\n\n domain.fx(n3si2) += hgfx[3];\n domain.fy(n3si2) += hgfy[3];\n domain.fz(n3si2) += hgfz[3];\n\n domain.fx(n4si2) += hgfx[4];\n domain.fy(n4si2) += hgfy[4];\n domain.fz(n4si2) += hgfz[4];\n\n domain.fx(n5si2) += hgfx[5];\n domain.fy(n5si2) += hgfy[5];\n domain.fz(n5si2) += hgfz[5];\n\n domain.fx(n6si2) += hgfx[6];\n domain.fy(n6si2) += hgfy[6];\n domain.fz(n6si2) += hgfz[6];\n\n domain.fx(n7si2) += hgfx[7];\n domain.fy(n7si2) += hgfy[7];\n domain.fz(n7si2) += hgfz[7];\n }\n } #pragma omp parallel for firstprivate(numElem, hourg)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": " if (numthreads > 1) {\n // Collect the data from the local arrays into the final force arrays\nfor( Index_t gnode=0 ; gnode #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": "al_t>(numElem8) ;\n Real_t *z8n = Allocate(numElem8) ;\n\n /* start loop over elements */\nfor (Index_t i=0 ; i #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": "lem,\n domain.numNode()) ;\n\n // check for negative element volume\nfor ( Index_t k=0 ; k #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": " domain.sizeX() + 1, domain.sizeY() + 1, domain.sizeZ() + 1,\n true, false) ;\n#endif \n\nfor (Index_t i=0; i #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": "**************/\n\nstatic inline\nvoid CalcAccelerationForNodes(Domain &domain, Index_t numNode)\n{\n \nfor (Index_t i = 0; i < numNode; ++i) {\n domain.xdd(i) = domain.fx(i) / domain.nodalMass(i);\n domain.ydd(i) = domain.fy(i) / domain.nodalMass(i);\n domain.zdd(i) = domain.fz(i) / domain.nodalMass(i);\n } #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": "(Domain &domain, const Real_t dt, const Real_t u_cut,\n Index_t numNode)\n{\n\nfor ( Index_t i = 0 ; i < numNode ; ++i )\n {\n Real_t xdtmp, ydtmp, zdtmp ;\n\n xdtmp = domain.xd(i) + domain.xdd(i) * dt ;\n if( FABS(xdtmp) < u_cut ) xdtmp = Real_t(0.0);\n domain.xd(i) = xdtmp ;\n\n ydtmp = domain.yd(i) + domain.ydd(i) * dt ;\n if( FABS(ydtmp) < u_cut ) ydtmp = Real_t(0.0);\n domain.yd(i) = ydtmp ;\n\n zdtmp = domain.zd(i) + domain.zdd(i) * dt ;\n if( FABS(zdtmp) < u_cut ) zdtmp = Real_t(0.0);\n domain.zd(i) = zdtmp ;\n } #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numNode)", "context_chars": 100, "text": "*****/\n\nstatic inline\nvoid CalcPositionForNodes(Domain &domain, const Real_t dt, Index_t numNode)\n{\nfor ( Index_t i = 0 ; i < numNode ; ++i )\n {\n domain.x(i) += domain.xd(i) * dt ;\n domain.y(i) += domain.yd(i) * dt ;\n domain.z(i) += domain.zd(i) * dt ;\n } #pragma omp parallel for firstprivate(numNode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem, deltaTime)", "context_chars": 100, "text": "w, \n Real_t deltaTime, Index_t numElem )\n{\n\n // loop over all elements\nfor( Index_t k=0 ; k #pragma omp parallel for firstprivate(numElem, deltaTime)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": "deltatime, numElem) ;\n\n // element loop to do some stuff not included in the elemlib function.\nfor ( Index_t k=0 ; k #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElem)", "context_chars": 100, "text": "onotonicQGradientsForElems(Domain& domain, Real_t vnew[])\n{\n Index_t numElem = domain.numElem();\n\nfor (Index_t i = 0 ; i < numElem ; ++i ) {\n const Real_t ptiny = Real_t(1.e-36) ;\n Real_t ax,ay,az ;\n Real_t dxv,dyv,dzv ;\n\n const Index_t *elemToNode = domain.nodelist(i);\n Index_t n0 = elemToNode[0] ;\n Index_t n1 = elemToNode[1] ;\n Index_t n2 = elemToNode[2] ;\n Index_t n3 = elemToNode[3] ;\n Index_t n4 = elemToNode[4] ;\n Index_t n5 = elemToNode[5] ;\n Index_t n6 = elemToNode[6] ;\n Index_t n7 = elemToNode[7] ;\n\n Real_t x0 = domain.x(n0) ;\n Real_t x1 = domain.x(n1) ;\n Real_t x2 = domain.x(n2) ;\n Real_t x3 = domain.x(n3) ;\n Real_t x4 = domain.x(n4) ;\n Real_t x5 = domain.x(n5) ;\n Real_t x6 = domain.x(n6) ;\n Real_t x7 = domain.x(n7) ;\n\n Real_t y0 = domain.y(n0) ;\n Real_t y1 = domain.y(n1) ;\n Real_t y2 = domain.y(n2) ;\n Real_t y3 = domain.y(n3) ;\n Real_t y4 = domain.y(n4) ;\n Real_t y5 = domain.y(n5) ;\n Real_t y6 = domain.y(n6) ;\n Real_t y7 = domain.y(n7) ;\n\n Real_t z0 = domain.z(n0) ;\n Real_t z1 = domain.z(n1) ;\n Real_t z2 = domain.z(n2) ;\n Real_t z3 = domain.z(n3) ;\n Real_t z4 = domain.z(n4) ;\n Real_t z5 = domain.z(n5) ;\n Real_t z6 = domain.z(n6) ;\n Real_t z7 = domain.z(n7) ;\n\n Real_t xv0 = domain.xd(n0) ;\n Real_t xv1 = domain.xd(n1) ;\n Real_t xv2 = domain.xd(n2) ;\n Real_t xv3 = domain.xd(n3) ;\n Real_t xv4 = domain.xd(n4) ;\n Real_t xv5 = domain.xd(n5) ;\n Real_t xv6 = domain.xd(n6) ;\n Real_t xv7 = domain.xd(n7) ;\n\n Real_t yv0 = domain.yd(n0) ;\n Real_t yv1 = domain.yd(n1) ;\n Real_t yv2 = domain.yd(n2) ;\n Real_t yv3 = domain.yd(n3) ;\n Real_t yv4 = domain.yd(n4) ;\n Real_t yv5 = domain.yd(n5) ;\n Real_t yv6 = domain.yd(n6) ;\n Real_t yv7 = domain.yd(n7) ;\n\n Real_t zv0 = domain.zd(n0) ;\n Real_t zv1 = domain.zd(n1) ;\n Real_t zv2 = domain.zd(n2) ;\n Real_t zv3 = domain.zd(n3) ;\n Real_t zv4 = domain.zd(n4) ;\n Real_t zv5 = domain.zd(n5) ;\n Real_t zv6 = domain.zd(n6) ;\n Real_t zv7 = domain.zd(n7) ;\n\n Real_t vol = domain.volo(i)*vnew[i] ;\n Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;\n\n Real_t dxj = Real_t(-0.25)*((x0+x1+x5+x4) - (x3+x2+x6+x7)) ;\n Real_t dyj = Real_t(-0.25)*((y0+y1+y5+y4) - (y3+y2+y6+y7)) ;\n Real_t dzj = Real_t(-0.25)*((z0+z1+z5+z4) - (z3+z2+z6+z7)) ;\n\n Real_t dxi = Real_t( 0.25)*((x1+x2+x6+x5) - (x0+x3+x7+x4)) ;\n Real_t dyi = Real_t( 0.25)*((y1+y2+y6+y5) - (y0+y3+y7+y4)) ;\n Real_t dzi = Real_t( 0.25)*((z1+z2+z6+z5) - (z0+z3+z7+z4)) ;\n\n Real_t dxk = Real_t( 0.25)*((x4+x5+x6+x7) - (x0+x1+x2+x3)) ;\n Real_t dyk = Real_t( 0.25)*((y4+y5+y6+y7) - (y0+y1+y2+y3)) ;\n Real_t dzk = Real_t( 0.25)*((z4+z5+z6+z7) - (z0+z1+z2+z3)) ;\n\n /* find delvk and delxk ( i cross j ) */\n\n ax = dyi*dzj - dzi*dyj ;\n ay = dzi*dxj - dxi*dzj ;\n az = dxi*dyj - dyi*dxj ;\n\n domain.delx_zeta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;\n\n ax *= norm ;\n ay *= norm ;\n az *= norm ;\n\n dxv = Real_t(0.25)*((xv4+xv5+xv6+xv7) - (xv0+xv1+xv2+xv3)) ;\n dyv = Real_t(0.25)*((yv4+yv5+yv6+yv7) - (yv0+yv1+yv2+yv3)) ;\n dzv = Real_t(0.25)*((zv4+zv5+zv6+zv7) - (zv0+zv1+zv2+zv3)) ;\n\n domain.delv_zeta(i) = ax*dxv + ay*dyv + az*dzv ;\n\n /* find delxi and delvi ( j cross k ) */\n\n ax = dyj*dzk - dzj*dyk ;\n ay = dzj*dxk - dxj*dzk ;\n az = dxj*dyk - dyj*dxk ;\n\n domain.delx_xi(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;\n\n ax *= norm ;\n ay *= norm ;\n az *= norm ;\n\n dxv = Real_t(0.25)*((xv1+xv2+xv6+xv5) - (xv0+xv3+xv7+xv4)) ;\n dyv = Real_t(0.25)*((yv1+yv2+yv6+yv5) - (yv0+yv3+yv7+yv4)) ;\n dzv = Real_t(0.25)*((zv1+zv2+zv6+zv5) - (zv0+zv3+zv7+zv4)) ;\n\n domain.delv_xi(i) = ax*dxv + ay*dyv + az*dzv ;\n\n /* find delxj and delvj ( k cross i ) */\n\n ax = dyk*dzi - dzk*dyi ;\n ay = dzk*dxi - dxk*dzi ;\n az = dxk*dyi - dyk*dxi ;\n\n domain.delx_eta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;\n\n ax *= norm ;\n ay *= norm ;\n az *= norm ;\n\n dxv = Real_t(-0.25)*((xv0+xv1+xv5+xv4) - (xv3+xv2+xv6+xv7)) ;\n dyv = Real_t(-0.25)*((yv0+yv1+yv5+yv4) - (yv3+yv2+yv6+yv7)) ;\n dzv = Real_t(-0.25)*((zv0+zv1+zv5+zv4) - (zv3+zv2+zv6+zv7)) ;\n\n domain.delv_eta(i) = ax*dxv + ay*dyv + az*dzv ;\n } #pragma omp parallel for firstprivate(numElem)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)", "context_chars": 100, "text": "q_max_slope();\n Real_t qlc_monoq = domain.qlc_monoq();\n Real_t qqc_monoq = domain.qqc_monoq();\n\nfor ( Index_t ielem = 0 ; ielem < domain.regElemSize(r); ++ielem ) {\n Index_t i = domain.regElemlist(r,ielem);\n Real_t qlin, qquad ;\n Real_t phixi, phieta, phizeta ;\n Int_t bcMask = domain.elemBC(i) ;\n Real_t delvm = 0.0, delvp =0.0;\n\n /* phixi */\n Real_t norm = Real_t(1.) / (domain.delv_xi(i)+ ptiny ) ;\n\n switch (bcMask & XI_M) {\n case XI_M_COMM: /* needs comm data */\n case 0: delvm = domain.delv_xi(domain.lxim(i)); break ;\n case XI_M_SYMM: delvm = domain.delv_xi(i) ; break ;\n case XI_M_FREE: delvm = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvm = 0; /* ERROR - but quiets the compiler */\n break;\n }\n switch (bcMask & XI_P) {\n case XI_P_COMM: /* needs comm data */\n case 0: delvp = domain.delv_xi(domain.lxip(i)) ; break ;\n case XI_P_SYMM: delvp = domain.delv_xi(i) ; break ;\n case XI_P_FREE: delvp = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvp = 0; /* ERROR - but quiets the compiler */\n break;\n }\n\n delvm = delvm * norm ;\n delvp = delvp * norm ;\n\n phixi = Real_t(.5) * ( delvm + delvp ) ;\n\n delvm *= monoq_limiter_mult ;\n delvp *= monoq_limiter_mult ;\n\n if ( delvm < phixi ) phixi = delvm ;\n if ( delvp < phixi ) phixi = delvp ;\n if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;\n if ( phixi > monoq_max_slope) phixi = monoq_max_slope;\n\n\n /* phieta */\n norm = Real_t(1.) / ( domain.delv_eta(i) + ptiny ) ;\n\n switch (bcMask & ETA_M) {\n case ETA_M_COMM: /* needs comm data */\n case 0: delvm = domain.delv_eta(domain.letam(i)) ; break ;\n case ETA_M_SYMM: delvm = domain.delv_eta(i) ; break ;\n case ETA_M_FREE: delvm = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvm = 0; /* ERROR - but quiets the compiler */\n break;\n }\n switch (bcMask & ETA_P) {\n case ETA_P_COMM: /* needs comm data */\n case 0: delvp = domain.delv_eta(domain.letap(i)) ; break ;\n case ETA_P_SYMM: delvp = domain.delv_eta(i) ; break ;\n case ETA_P_FREE: delvp = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvp = 0; /* ERROR - but quiets the compiler */\n break;\n }\n\n delvm = delvm * norm ;\n delvp = delvp * norm ;\n\n phieta = Real_t(.5) * ( delvm + delvp ) ;\n\n delvm *= monoq_limiter_mult ;\n delvp *= monoq_limiter_mult ;\n\n if ( delvm < phieta ) phieta = delvm ;\n if ( delvp < phieta ) phieta = delvp ;\n if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;\n if ( phieta > monoq_max_slope) phieta = monoq_max_slope;\n\n /* phizeta */\n norm = Real_t(1.) / ( domain.delv_zeta(i) + ptiny ) ;\n\n switch (bcMask & ZETA_M) {\n case ZETA_M_COMM: /* needs comm data */\n case 0: delvm = domain.delv_zeta(domain.lzetam(i)) ; break ;\n case ZETA_M_SYMM: delvm = domain.delv_zeta(i) ; break ;\n case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvm = 0; /* ERROR - but quiets the compiler */\n break;\n }\n switch (bcMask & ZETA_P) {\n case ZETA_P_COMM: /* needs comm data */\n case 0: delvp = domain.delv_zeta(domain.lzetap(i)) ; break ;\n case ZETA_P_SYMM: delvp = domain.delv_zeta(i) ; break ;\n case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;\n default: fprintf(stderr, \"Error in switch at %s line %d\\n\",\n __FILE__, __LINE__);\n delvp = 0; /* ERROR - but quiets the compiler */\n break;\n }\n\n delvm = delvm * norm ;\n delvp = delvp * norm ;\n\n phizeta = Real_t(.5) * ( delvm + delvp ) ;\n\n delvm *= monoq_limiter_mult ;\n delvp *= monoq_limiter_mult ;\n\n if ( delvm < phizeta ) phizeta = delvm ;\n if ( delvp < phizeta ) phizeta = delvp ;\n if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);\n if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;\n\n /* Remove length scale */\n\n if ( domain.vdov(i) > Real_t(0.) ) {\n qlin = Real_t(0.) ;\n qquad = Real_t(0.) ;\n }\n else {\n Real_t delvxxi = domain.delv_xi(i) * domain.delx_xi(i) ;\n Real_t delvxeta = domain.delv_eta(i) * domain.delx_eta(i) ;\n Real_t delvxzeta = domain.delv_zeta(i) * domain.delx_zeta(i) ;\n\n if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;\n if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;\n if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;\n\n Real_t rho = domain.elemMass(i) / (domain.volo(i) * vnew[i]) ;\n\n qlin = -qlc_monoq * rho *\n ( delvxxi * (Real_t(1.) - phixi) +\n delvxeta * (Real_t(1.) - phieta) +\n delvxzeta * (Real_t(1.) - phizeta) ) ;\n\n qquad = qqc_monoq * rho *\n ( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +\n delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +\n delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;\n }\n\n domain.qq(i) = qquad ;\n domain.ql(i) = qlin ;\n } #pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length)", "context_chars": 100, "text": " Real_t p_cut, Real_t eosvmax,\n Index_t length, Index_t *regElemList)\n{\nfor (Index_t i = 0; i < length ; ++i) {\n Real_t c1s = Real_t(2.0)/Real_t(3.0) ;\n bvc[i] = c1s * (compression[i] + Real_t(1.));\n pbvc[i] = c1s;\n } #pragma omp parallel for firstprivate(length)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)", "context_chars": 100, "text": "t(2.0)/Real_t(3.0) ;\n bvc[i] = c1s * (compression[i] + Real_t(1.));\n pbvc[i] = c1s;\n }\n\nfor (Index_t i = 0 ; i < length ; ++i){\n Index_t elem = regElemList[i];\n \n p_new[i] = bvc[i] * e_old[i] ;\n\n if (FABS(p_new[i]) < p_cut )\n p_new[i] = Real_t(0.0) ;\n\n if ( vnewc[elem] >= eosvmax ) /* impossible condition here? */\n p_new[i] = Real_t(0.0) ;\n\n if (p_new[i] < pmin)\n p_new[i] = pmin ;\n } #pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, emin)", "context_chars": 100, "text": " Index_t length, Index_t *regElemList)\n{\n Real_t *pHalfStep = Allocate(length) ;\n\nfor (Index_t i = 0 ; i < length ; ++i) {\n e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i])\n + Real_t(0.5) * work[i];\n\n if (e_new[i] < emin ) {\n e_new[i] = emin ;\n }\n } #pragma omp parallel for firstprivate(length, emin)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, rho0)", "context_chars": 100, "text": "c, e_new, compHalfStep, vnewc,\n pmin, p_cut, eosvmax, length, regElemList);\n\nfor (Index_t i = 0 ; i < length ; ++i) {\n Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]) ;\n\n if ( delvc[i] > Real_t(0.) ) {\n q_new[i] /* = qq_old[i] = ql_old[i] */ = Real_t(0.) ;\n }\n else {\n Real_t ssc = ( pbvc[i] * e_new[i]\n + vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ;\n\n if ( ssc <= Real_t(.1111111e-36) ) {\n ssc = Real_t(.3333333e-18) ;\n } else {\n ssc = SQRT(ssc) ;\n }\n\n q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;\n }\n\n e_new[i] = e_new[i] + Real_t(0.5) * delvc[i]\n * ( Real_t(3.0)*(p_old[i] + q_old[i])\n - Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;\n } #pragma omp parallel for firstprivate(length, rho0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, emin, e_cut)", "context_chars": 100, "text": "eal_t(3.0)*(p_old[i] + q_old[i])\n - Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;\n }\n\nfor (Index_t i = 0 ; i < length ; ++i) {\n\n e_new[i] += Real_t(0.5) * work[i];\n\n if (FABS(e_new[i]) < e_cut) {\n e_new[i] = Real_t(0.) ;\n }\n if ( e_new[i] < emin ) {\n e_new[i] = emin ;\n }\n } #pragma omp parallel for firstprivate(length, emin, e_cut)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, rho0, emin, e_cut)", "context_chars": 100, "text": "vc, e_new, compression, vnewc,\n pmin, p_cut, eosvmax, length, regElemList);\n\nfor (Index_t i = 0 ; i < length ; ++i){\n const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;\n Index_t elem = regElemList[i];\n Real_t q_tilde ;\n\n if (delvc[i] > Real_t(0.)) {\n q_tilde = Real_t(0.) ;\n }\n else {\n Real_t ssc = ( pbvc[i] * e_new[i]\n + vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;\n\n if ( ssc <= Real_t(.1111111e-36) ) {\n ssc = Real_t(.3333333e-18) ;\n } else {\n ssc = SQRT(ssc) ;\n }\n\n q_tilde = (ssc*ql_old[i] + qq_old[i]) ;\n }\n\n e_new[i] = e_new[i] - ( Real_t(7.0)*(p_old[i] + q_old[i])\n - Real_t(8.0)*(pHalfStep[i] + q_new[i])\n + (p_new[i] + q_tilde)) * delvc[i]*sixth ;\n\n if (FABS(e_new[i]) < e_cut) {\n e_new[i] = Real_t(0.) ;\n }\n if ( e_new[i] < emin ) {\n e_new[i] = emin ;\n }\n } #pragma omp parallel for firstprivate(length, rho0, emin, e_cut)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, rho0, q_cut)", "context_chars": 100, "text": "vc, e_new, compression, vnewc,\n pmin, p_cut, eosvmax, length, regElemList);\n\nfor (Index_t i = 0 ; i < length ; ++i){\n Index_t elem = regElemList[i];\n\n if ( delvc[i] <= Real_t(0.) ) {\n Real_t ssc = ( pbvc[i] * e_new[i]\n + vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;\n\n if ( ssc <= Real_t(.1111111e-36) ) {\n ssc = Real_t(.3333333e-18) ;\n } else {\n ssc = SQRT(ssc) ;\n }\n\n q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;\n\n if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.) ;\n }\n } #pragma omp parallel for firstprivate(length, rho0, q_cut)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(rho0, ss4o3)", "context_chars": 100, "text": " Real_t *bvc, Real_t ss4o3,\n Index_t len, Index_t *regElemList)\n{\nfor (Index_t i = 0; i < len ; ++i) {\n Index_t elem = regElemList[i];\n Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[elem] * vnewc[elem] *\n bvc[i] * pnewc[i]) / rho0;\n if (ssTmp <= Real_t(.1111111e-36)) {\n ssTmp = Real_t(.3333333e-18);\n }\n else {\n ssTmp = SQRT(ssTmp);\n }\n domain.ss(elem) = ssTmp ;\n } #pragma omp parallel for firstprivate(rho0, ss4o3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(numElemReg)", "context_chars": 100, "text": " qq_old, ql_old, rho0, eosvmax,\n numElemReg, regElemList);\n }\n\nfor (Index_t i=0; i #pragma omp parallel for firstprivate(numElemReg)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc", "omp_pragma_line": "#pragma omp parallel for firstprivate(length, v_cut)", "context_chars": 100, "text": "ain, Real_t *vnew,\n Real_t v_cut, Index_t length)\n{\n if (length != 0) {\nfor(Index_t i=0 ; i #pragma omp parallel for firstprivate(length, v_cut)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/perform_element_loop.hpp", "omp_pragma_line": "#pragma omp parallel for shared (elemIDs)", "context_chars": 100, "text": " iter.x, iter.y, iter.z);\n }\n\n timer_type t_gn = 0, t_ce = 0, t_si = 0;\n timer_type t0 = 0;\n\n for(MINIFE_GLOBAL_ORDINAL i=0; i < elemIDs.size(); ++i) {\n ElemData elem_data;\n compute_gradient_values(elem_data.grad_vals);\n\n get_elem_nodes_and_coords(mesh, elemIDs[i], elem_data);\n compute_element_matrix_and_vector(elem_data);\n sum_into_global_linear_system(elem_data, A, b);\n } #pragma omp parallel for shared (elemIDs)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/generate_matrix_structure.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ffset_ptr = &row_offsets[0];\n MINIFE_LOCAL_ORDINAL* const row_coords_ptr = &row_coords[0];\n\n\tfor(int r = 0; r < r_n; ++r) {\n\t\tint iz = r / (xy_width) + box[2][0];\n\t\tint iy = (r / x_width) % y_width + box[1][0];\n\t\tint ix = r % x_width + box[0][0];\n\n \tGlobalOrdinal row_id =\n \tget_id(global_nodes_x, global_nodes_y, global_nodes_z,\n \tix, iy, iz);\n \trow_ptr[r] = mesh.map_id_to_row(row_id);\n \trow_coords_ptr[r*3] = ix;\n \trow_coords_ptr[r*3+1] = iy;\n row_coords_ptr[r*3+2] = iz;\n\n\t\t\tMINIFE_LOCAL_ORDINAL nnz = 0;\n for(int sz=-1; sz<=1; ++sz) {\n \tfor(int sy=-1; sy<=1; ++sy) {\n \tfor(int sx=-1; sx<=1; ++sx) {\n \tGlobalOrdinal col_id =\nget_id(global_nodes_x, global_nodes_y, global_nodes_z,\n\t ix+sx, iy+sy, iz+sz);\n\n \tif (col_id >= 0 && col_id < global_nrows) {\n \t++nnz;\n \t}\n \t}\n \t}\n \t}\n \trow_offset_ptr[r+1] = nnz;\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/exchange_externals.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ".size();\n#ifdef MINIFE_DEBUG\n os << \"total_to_be_sent: \" << total_to_be_sent << std::endl;\n#endif\n\nfor(size_t i=0; i x.coefs.size()) {\n os << \"error, out-of-range. x.coefs.size()==\"< #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "y.coefs[0];\n ScalarType* wcoefs = &w.coefs[0];\n\n if(beta == 0.0) {\n\tif(alpha == 1.0) {\n \t\tfor(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "a omp parallel for\n \t\tfor(int i=0; ifor(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "=0; ifor(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \t\tfor(int i=0; ifor(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " &x.coefs[0];\n MINIFE_SCALAR* ycoefs = &y.coefs[0];\n\n if(alpha == 1.0 && beta == 1.0) {\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] += xcoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] += xcoefs[i];\n \t }\n } else if (beta == 1.0) {\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] += alpha * xcoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt i = 0; i < n; ++i) {\n\t ycoefs[i] += alpha * xcoefs[i];\n \t }\n } else if (alpha == 1.0) {\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] = xcoefs[i] + beta * ycoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i < n; ++i) {\n\t ycoefs[i] = xcoefs[i] + beta * ycoefs[i];\n \t }\n } else if (beta == 0.0) {\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] = alpha * xcoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rallel for\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] = alpha * xcoefs[i];\n \t }\n } else {\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] = alpha * xcoefs[i] + beta * ycoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:result)", "context_chars": 100, "text": "calar* xcoefs = &x.coefs[0];\n const Scalar* ycoefs = &y.coefs[0];\n MINIFE_SCALAR result = 0;\n\n for(int i=0; i #pragma omp parallel for reduction(+:result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:result)", "context_chars": 100, "text": "itude_type magnitude;\n\n const MINIFE_SCALAR* xcoefs = &x.coefs[0];\n MINIFE_SCALAR result = 0;\n\n for(int i=0; i #pragma omp parallel for reduction(+:result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/CSRMatrix.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " packed_cols.reserve(nrows * ncols_per_row);\n packed_coefs.reserve(nrows * ncols_per_row);\n\n for(MINIFE_GLOBAL_ORDINAL i = 0; i < nrows; ++i) {\n\trows[i] = 0;\n\trow_offsets[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/CSRMatrix.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " for(MINIFE_GLOBAL_ORDINAL i = 0; i < nrows; ++i) {\n\trows[i] = 0;\n\trow_offsets[i] = 0;\n }\n\n for(MINIFE_GLOBAL_ORDINAL i = 0; i < (nrows * ncols_per_row); ++i) {\n\tpacked_cols[i] = 0;\n\tpacked_coefs[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/SparseMatrix_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n zero_row_and_put_1_on_diagonal(A, row);\n }\n }\n\n const int ROW_COUNT = A.rows.size();\n\n for(MINIFE_GLOBAL_ORDINAL i=0; i < ROW_COUNT; ++i) {\n GlobalOrdinal row = A.rows[i];\n\n if (bc_rows.find(row) != bc_rows.end()) continue;\n\n size_t row_length = 0;\n GlobalOrdinal* cols = NULL;\n Scalar* coefs = NULL;\n A.get_row_pointers(row, row_length, cols, coefs);\n\n Scalar sum = 0;\n for(size_t j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/SparseMatrix_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = &y.coefs[0];\n const ScalarType beta = 0;\n\n for(MINIFE_GLOBAL_ORDINAL row = 0; row < rows_size; ++row) {\n const MINIFE_GLOBAL_ORDINAL row_start = Arowoffsets[row];\n const MINIFE_GLOBAL_ORDINAL row_end = Arowoffsets[row+1];\n\n MINIFE_SCALAR sum = 0;\n\n #pragma loop_count(15)\n for(MINIFE_GLOBAL_ORDINAL i = row_start; i < row_end; ++i) {\n sum += Acoefs[i] * xcoefs[Acols[i]];\n }\n\n ycoefs[row] = sum;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/SparseMatrix_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "alarType* xcoefs = &x.coefs[0];\n ScalarType* ycoefs = &y.coefs[0];\n ScalarType beta = 0;\n\n for(int row=0; row #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/main.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:value)", "context_chars": 100, "text": "hread Pool...\" << std::endl;\n }\n int value = 0;\n const int thread_count = omp_get_max_threads();\nfor(int i = 0; i < thread_count; ++i) {\n\tvalue += 1;\n } #pragma omp parallel for reduction(+:value)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "inal local_sz)\n : startIndex(startIdx),\n local_size(local_sz),\n coefs(local_size)\n {\n for(MINIFE_LOCAL_ORDINAL i=0; i < local_size; ++i) {\n\tcoefs[i] = 0;\t \n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/generator/graph_generator.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ");\n val1 *= UINT64_C(0xFFFFFFFF);\n val1 += mrg_get_uint_orig(&new_state);\n }\n\n#ifdef _OPENMP\n#ifdef __MTA__\n#pragma mta assert parallel\n#pragma mta block schedule\n\n for (ei = start_edge; ei < end_edge; ++ei) {\n mrg_state new_state = state;\n mrg_skip(&new_state, 0, ei, 0);\n make_one_edge(nverts, 0, logN, &new_state, edges + (ei - start_edge), val0, val1);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE);\n }\n ptrdiff_t i;\n#ifdef _OPENMP\nfor (i = 0; i < edge_count; ++i) {\n int64_t src = get_v0_from_edge(&actual_buf[i]);\n int64_t tgt = get_v1_from_edge(&actual_buf[i]);\n if (src == tgt) continue;\n if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) {\n#ifdef _OPENMP\n#pragma omp atomic\n\n has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT));\n }\n if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) {\n#ifdef _OPENMP\n#pragma omp atomic\n\n has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT));\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_custom.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "tex = vertex_p;\n int* restrict owner = owner_p;\n size_t* restrict local = local_p;\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)count; ++i) {\n owner[i] = VERTEX_OWNER(vertex[i]);\n local[i] = VERTEX_LOCAL(vertex[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_one_sided.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "tex = vertex_p;\n int* restrict owner = owner_p;\n size_t* restrict local = local_p;\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)count; ++i) {\n owner[i] = VERTEX_OWNER(vertex[i]);\n local[i] = VERTEX_LOCAL(vertex[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/oned_csr.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " size_t* restrict counts = temp;\n memset(counts, 0, nrows * sizeof(size_t));\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)inbuf_size; ++i) {\n assert ((size_t)(VERTEX_LOCAL(get_v0_from_edge(&inbuf[i]))) < nrows);\n#pragma omp atomic\n ++counts[VERTEX_LOCAL(get_v0_from_edge(&inbuf[i]))];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/oned_csr.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* restrict inserts = temp;\n memcpy(inserts, rowstarts, nrows * sizeof(size_t));\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)inbuf_size; ++i) {\n int64_t v0 = get_v0_from_edge(&inbuf[i]);\n int64_t v1 = get_v1_from_edge(&inbuf[i]);\n assert ((size_t)(VERTEX_LOCAL(v0)) < nrows);\n size_t pos = __sync_fetch_and_add(&inserts[VERTEX_LOCAL(v0)], 1);\n assert (pos < inbuf_size);\n column[pos] = v1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/oned_csr.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "starts, (a_nlocalverts + 1) * sizeof(size_t));\n b_nlocalverts = b->nlocalverts = a->nlocalverts;\nfor (i = old_b_nlocalverts; i < b_nlocalverts; ++i) {\n b->rowstarts[i + 1] = b_nlocaledges;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/oned_csr.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "eof(int64_t));\n }\n b_nlocaledges = b->nlocaledges = b_nlocaledges + a_nlocaledges;\n ptrdiff_t i;\nfor (i = 0; i <= a_nlocalverts; ++i) {\n b->rowstarts[i] += a->rowstarts[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/oned_csr.c", "omp_pragma_line": "#pragma omp parallel for if(a_nlocalverts != b_nlocalverts)", "context_chars": 100, "text": "p parallel for\n for (i = 0; i <= a_nlocalverts; ++i) {\n b->rowstarts[i] += a->rowstarts[i];\n }\nfor (i = a_nlocalverts + 1; i <= b_nlocalverts; ++i) {\n b->rowstarts[i] += a_nlocaledges;\n } #pragma omp parallel for if(a_nlocalverts != b_nlocalverts)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/oned_csc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " size_t* restrict counts = temp;\n memset(counts, 0, nrows * sizeof(size_t));\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)inbuf_size; ++i) {\n assert ((size_t)(SWIZZLE_VERTEX(get_v1_from_edge(&inbuf[i])) / ULONG_BITS) < nrows);\n#pragma omp atomic\n ++counts[SWIZZLE_VERTEX(get_v1_from_edge(&inbuf[i])) / ULONG_BITS];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/oned_csc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* restrict inserts = temp;\n memcpy(inserts, rowstarts, nrows * sizeof(size_t));\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)inbuf_size; ++i) {\n int64_t v0 = get_v0_from_edge(&inbuf[i]);\n int64_t v1 = SWIZZLE_VERTEX(get_v1_from_edge(&inbuf[i]));\n // fprintf(stderr, \"%d: Raw edge is (%\" PRId64 \", %\" PRId64 \") -> (%zu, %\" PRId64 \" = %\" PRId64 \")\\n\", rank, v0, get_v1_from_edge(&inbuf[i]), VERTEX_LOCAL(v0), v1, UNSWIZZLE_VERTEX(v1));\n size_t pos = __sync_fetch_and_add(&inserts[(v1) / ULONG_BITS], 1);\n column[pos] = (v1 % ULONG_BITS) + VERTEX_LOCAL(v0) * ULONG_BITS;\n // fprintf(stderr, \"%d: Stored as (row %\" PRId64 \", col %\" PRId64 \"/%\" PRId64 \")\\n\", rank, (v1) / ULONG_BITS, column[pos] % ULONG_BITS, column[pos] / ULONG_BITS);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/oned_csc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ">rowstarts[i + 1] - b->rowstarts[i]) * sizeof(int64_t));\n }\n /* This loop can be parallel. */\nfor (i_plus_1 = nrows; i_plus_1 > 0; --i_plus_1) {\n ptrdiff_t i = i_plus_1 - 1;\n memcpy(&b->column[b->rowstarts[i + 1] + a->rowstarts[i]],\n &a->column[a->rowstarts[i]],\n (a->rowstarts[i + 1] - a->rowstarts[i]) * sizeof(int64_t));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/oned_csc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t64_t));\n }\n b_nlocaledges = b->nlocaledges = b_nlocaledges + a_nlocaledges;\n ptrdiff_t i;\nfor (i = 0; i <= nrows; ++i) {\n b->rowstarts[i] += a->rowstarts[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_replicated_csc.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "e_summary, 0, local_queue_summary_size * sizeof(unsigned long));\n ptrdiff_t i, ii_summary;\n#if 0\nfor (i = 0; i < global_queue_summary_size; ++i) {\n unsigned long val = 0UL;\n int j;\n unsigned long mask = 1UL;\n for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) {\n if (in_queue[i * ULONG_BITS + j]) val |= mask;\n }\n in_queue_summary[i] = val;\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_replicated_csc.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(|:not_done)", "context_chars": 100, "text": " val |= mask;\n }\n in_queue_summary[i] = val;\n }\n#endif\n unsigned long not_done = 0;\nfor (ii_summary = 0; ii_summary < global_queue_summary_size; ++ii_summary) {\n uint64_t val_summary = in_queue_summary[ii_summary];\n if (val_summary == 0) continue;\n int ii_offset;\n ptrdiff_t ii;\n for (ii_offset = 0; ii_offset < ULONG_BITS; ++ii_offset) {\n if ((val_summary & (UINT64_C(1) << ii_offset)) == 0) continue;\n ii = ii_summary * ULONG_BITS + ii_offset;\n uint64_t val = in_queue[ii];\n if (val == 0) continue;\n size_t i, i_end = rowstarts[ii + 1];\n for (i = rowstarts[ii]; i < i_end; ++i) {\n int64_t c = column[i];\n int64_t v0_local = c / ULONG_BITS;\n if ((val & (UINT64_C(1) << (c % ULONG_BITS))) != 0 /* TEST_IN(v1_swizzled) */ && !TAS_VISITED_LOCAL(v0_local)) {\n assert (pred[v0_local] == -1);\n int64_t v1_swizzled = (int64_t)ii * ULONG_BITS + c % ULONG_BITS;\n pred[v0_local] = UNSWIZZLE_VERTEX(v1_swizzled);\n not_done |= 1;\n }\n }\n }\n } #pragma omp parallel for schedule(static) reduction(|:not_done)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_replicated_csc.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "UNSWIZZLE_VERTEX(v1_swizzled);\n not_done |= 1;\n }\n }\n }\n }\n#if 1\nfor (i = 0; i < local_queue_summary_size; ++i) {\n unsigned long val = 0UL;\n int j;\n unsigned long mask = 1UL;\n for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) {\n unsigned long full_val = out_queue[i * ULONG_BITS + j];\n visited[i * ULONG_BITS + j] |= full_val;\n if (full_val) val |= mask;\n }\n out_queue_summary[i] = val;\n // not_done |= val;\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_replicated_csc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "tex = vertex_p;\n int* restrict owner = owner_p;\n size_t* restrict local = local_p;\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)count; ++i) {\n owner[i] = VERTEX_OWNER(vertex[i]);\n local[i] = VERTEX_LOCAL(vertex[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_simple.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "tex = vertex_p;\n int* restrict owner = owner_p;\n size_t* restrict local = local_p;\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)count; ++i) {\n owner[i] = VERTEX_OWNER(vertex[i]);\n local[i] = VERTEX_LOCAL(vertex[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_replicated.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "out_queue_summary, 0, local_queue_summary_size * sizeof(unsigned long));\n ptrdiff_t i, ii;\n#if 0\nfor (i = 0; i < global_queue_summary_size; ++i) {\n unsigned long val = 0UL;\n int j;\n unsigned long mask = 1UL;\n for (j = 0; j < ulong_bits; ++j, mask <<= 1) {\n if (in_queue[i * ulong_bits + j]) val |= mask;\n }\n in_queue_summary[i] = val;\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_replicated.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(|:not_done)", "context_chars": 100, "text": " val |= mask;\n }\n in_queue_summary[i] = val;\n }\n#endif\n unsigned long not_done = 0;\nfor (ii = 0; ii < nlocalverts; ii += ulong_bits) {\n size_t i, i_end = ii + ulong_bits;\n if (i_end > nlocalverts) i_end = nlocalverts;\n for (i = ii; i < i_end; ++i) {\n if (!TEST_VISITED_LOCAL(i)) {\n size_t j, j_end = rowstarts[i + 1];\n for (j = rowstarts[i]; j < j_end; ++j) {\n int64_t v1 = column[j];\n int64_t v1_swizzled = SWIZZLE_VERTEX(v1);\n if (TEST_IN(v1_swizzled)) {\n pred[i] = (v1 & INT64_C(0xFFFFFFFFFFFF)) | ((int64_t)cur_level << 48);\n not_done |= 1;\n SET_VISITED_LOCAL(i);\n break;\n }\n }\n }\n }\n } #pragma omp parallel for schedule(static) reduction(|:not_done)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_replicated.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " SET_VISITED_LOCAL(i);\n break;\n }\n }\n }\n }\n }\n#if 1\nfor (i = 0; i < local_queue_summary_size; ++i) {\n unsigned long val = 0UL;\n int j;\n unsigned long mask = 1UL;\n for (j = 0; j < ulong_bits; ++j, mask <<= 1) {\n unsigned long full_val = out_queue[i * ulong_bits + j];\n visited[i * ulong_bits + j] |= full_val;\n if (full_val) val |= mask;\n }\n out_queue_summary[i] = val;\n // not_done |= val;\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/bfs_replicated.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "tex = vertex_p;\n int* restrict owner = owner_p;\n size_t* restrict local = local_p;\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)count; ++i) {\n int64_t v = vertex[i];\n owner[i] = VERTEX_OWNER(v);\n local[i] = VERTEX_LOCAL(v);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for reduction(||:any_range_errors)", "context_chars": 100, "text": "& i_start <= (ptrdiff_t)nlocalverts);\n assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);\nfor (i = i_start; i < i_end; ++i) {\n int64_t p = get_pred_from_pred_entry(pred[i]);\n if (p < -1 || p >= nglobalverts) {\n fprintf(stderr, \"%d: Validation error: parent of vertex %\" PRId64 \" is out-of-range value %\" PRId64 \".\\n\", rank, vertex_to_global_for_pred(rank, i), p);\n any_range_errors = 1;\n }\n } #pragma omp parallel for reduction(||:any_range_errors)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (root_owner == rank);\n if (root_is_mine) assert (root_local < nlocalverts);\n\n {\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)nlocalverts; ++i) write_pred_entry_depth(&pred[i], UINT16_MAX);\n if (root_is_mine) write_pred_entry_depth(&pred[root_local], 0);\n }\n int64_t* restrict pred_pred = (int64_t*)xMPI_Alloc_mem(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Predecessor info of predecessor vertex for each local vertex */\n gather* pred_win = init_gather((void*)pred, nlocalverts, sizeof(int64_t), pred_pred, size_min(CHUNKSIZE, nlocalverts), size_min(CHUNKSIZE, nlocalverts), MPI_INT64_T);\n int64_t* restrict pred_vtx = (int64_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Vertex (not depth) part of pred map */\n int* restrict pred_owner = (int*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int));\n size_t* restrict pred_local = (size_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(size_t));\n int iter_number = 0;\n {\n /* Iteratively update depth[v] = min(depth[v], depth[pred[v]] + 1) [saturating at UINT16_MAX] until no changes. */\n while (1) {\n ++iter_number;\n int any_changes = 0;\n ptrdiff_t ii;\n for (ii = 0; ii < (ptrdiff_t)maxlocalverts; ii += CHUNKSIZE) {\n ptrdiff_t i_start = ptrdiff_min(ii, nlocalverts);\n ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts);\n begin_gather(pred_win);\n ptrdiff_t i;\n assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts);\n assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);\n#pragma omp parallel for\n for (i = i_start; i < i_end; ++i) {\n pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);\n }\n get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);\n#pragma omp parallel for\n for (i = i_start; i < i_end; ++i) {\n if (pred[i] != -1) {\n add_gather_request(pred_win, i - i_start, pred_owner[i - i_start], pred_local[i - i_start], i - i_start);\n } else {\n pred_pred[i - i_start] = -1;\n }\n }\n end_gather(pred_win);\n#pragma omp parallel for reduction(&&:validation_passed) reduction(||:any_changes)\n for (i = i_start; i < i_end; ++i) {\n if (rank == root_owner && (size_t)i == root_local) continue;\n if (get_depth_from_pred_entry(pred_pred[i - i_start]) != UINT16_MAX) {\n if (get_depth_from_pred_entry(pred[i]) != UINT16_MAX && get_depth_from_pred_entry(pred[i]) != get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {\n fprintf(stderr, \"%d: Validation error: BFS predecessors do not form a tree; see vertices %\" PRId64 \" (depth %\" PRIu16 \") and %\" PRId64 \" (depth %\" PRIu16 \").\\n\", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]), get_pred_from_pred_entry(pred[i]), get_depth_from_pred_entry(pred_pred[i - i_start]));\n validation_passed = 0;\n } else if (get_depth_from_pred_entry(pred[i]) == get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {\n /* Nothing to do */\n } else {\n write_pred_entry_depth(&pred[i], get_depth_from_pred_entry(pred_pred[i - i_start]) + 1);\n any_changes = 1;\n }\n }\n }\n }\n MPI_Allreduce(MPI_IN_PLACE, &any_changes, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD);\n if (!any_changes) break;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i_start <= (ptrdiff_t)nlocalverts);\n assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);\nfor (i = i_start; i < i_end; ++i) {\n pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);\nfor (i = i_start; i < i_end; ++i) {\n if (pred[i] != -1) {\n add_gather_request(pred_win, i - i_start, pred_owner[i - i_start], pred_local[i - i_start], i - i_start);\n } else {\n pred_pred[i - i_start] = -1;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for reduction(&&:validation_passed) reduction(||:any_changes)", "context_chars": 100, "text": "else {\n pred_pred[i - i_start] = -1;\n }\n }\n end_gather(pred_win);\nfor (i = i_start; i < i_end; ++i) {\n if (rank == root_owner && (size_t)i == root_local) continue;\n if (get_depth_from_pred_entry(pred_pred[i - i_start]) != UINT16_MAX) {\n if (get_depth_from_pred_entry(pred[i]) != UINT16_MAX && get_depth_from_pred_entry(pred[i]) != get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {\n fprintf(stderr, \"%d: Validation error: BFS predecessors do not form a tree; see vertices %\" PRId64 \" (depth %\" PRIu16 \") and %\" PRId64 \" (depth %\" PRIu16 \").\\n\", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]), get_pred_from_pred_entry(pred[i]), get_depth_from_pred_entry(pred_pred[i - i_start]));\n validation_passed = 0;\n } else if (get_depth_from_pred_entry(pred[i]) == get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {\n /* Nothing to do */\n } else {\n write_pred_entry_depth(&pred[i], get_depth_from_pred_entry(pred_pred[i - i_start]) + 1);\n any_changes = 1;\n }\n }\n } #pragma omp parallel for reduction(&&:validation_passed) reduction(||:any_changes)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for reduction(&&:validation_passed)", "context_chars": 100, "text": "0.\\n\", rank, root, get_depth_from_pred_entry(pred[root_local]));\n validation_passed = 0;\n }\nfor (i = 0; i < (ptrdiff_t)nlocalverts; ++i) {\n if (get_pred_from_pred_entry(pred[i]) == -1 &&\n get_depth_from_pred_entry(pred[i]) != UINT16_MAX) {\n fprintf(stderr, \"%d: Validation error: depth of vertex %\" PRId64 \" with no predecessor is %\" PRIu16 \", not UINT16_MAX.\\n\", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]));\n validation_passed = 0;\n } else if (get_pred_from_pred_entry(pred[i]) != -1 &&\n get_depth_from_pred_entry(pred[i]) == UINT16_MAX) {\n fprintf(stderr, \"%d: Validation error: predecessor of claimed unreachable vertex %\" PRId64 \" is %\" PRId64 \", not -1.\\n\", rank, vertex_to_global_for_pred(rank, i), get_pred_from_pred_entry(pred[i]));\n validation_passed = 0;\n }\n } #pragma omp parallel for reduction(&&:validation_passed)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " assert (i_end - i_start >= 0 && i_end - i_start <= (ptrdiff_t)size_min(CHUNKSIZE, nlocalverts));\nfor (i = i_start; i < i_end; ++i) {\n pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i]);\n }\n get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);\nfor (i = i_start; i < i_end; ++i) {\n if (pred[i] != -1) {\n add_gather_request(pred_win, i - i_start, pred_owner[i - i_start], pred_local[i - i_start], i - i_start);\n } else {\n pred_pred[i - i_start] = -1;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for reduction(&&:validation_passed)", "context_chars": 100, "text": "start);\n } else {\n pred_pred[i - i_start] = -1;\n }\n }\n end_gather(pred_win);\nfor (i = i_start; i < i_end; ++i) {\n if (rank == root_owner && (size_t)i == root_local) continue;\n if (get_pred_from_pred_entry(pred[i]) == -1) continue; /* Already checked */\n if (get_depth_from_pred_entry(pred_pred[i - i_start]) == UINT16_MAX) {\n fprintf(stderr, \"%d: Validation error: predecessor %\" PRId64 \" of vertex %\" PRId64 \" (depth %\" PRIu16 \") is marked as unreachable.\\n\", rank, get_pred_from_pred_entry(pred[i]), vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]));\n validation_passed = 0;\n }\n if (get_depth_from_pred_entry(pred[i]) != get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {\n fprintf(stderr, \"%d: Validation error: BFS predecessors do not form a tree; see vertices %\" PRId64 \" (depth %\" PRIu16 \") and %\" PRId64 \" (depth %\" PRIu16 \").\\n\", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]), get_pred_from_pred_entry(pred[i]), get_depth_from_pred_entry(pred_pred[i - i_start]));\n validation_passed = 0;\n }\n } #pragma omp parallel for reduction(&&:validation_passed)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "& i_start <= (ptrdiff_t)nlocalverts);\n assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);\nfor (i = i_start; i < i_end; ++i) {\n pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for reduction(&&:validation_passed)", "context_chars": 100, "text": "\n }\n get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);\nfor (i = i_start; i < i_end; ++i) {\n if ((!root_is_mine || (size_t)i != root_local) &&\n get_pred_from_pred_entry(pred[i]) != -1 &&\n pred_owner[i - i_start] == rank &&\n pred_local[i - i_start] == (size_t)i) {\n fprintf(stderr, \"%d: Validation error: parent of non-root vertex %\" PRId64 \" is itself.\\n\", rank, vertex_to_global_for_pred(rank, i));\n validation_passed = 0;\n }\n } #pragma omp parallel for reduction(&&:validation_passed)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "HALF_CHUNKSIZE, bufsize);\n assert (i_end - i_start <= edge_chunk_size);\n ptrdiff_t i;\nfor (i = i_start; i < i_end; ++i) {\n int64_t v0 = get_v0_from_edge(&buf[i]);\n int64_t v1 = get_v1_from_edge(&buf[i]);\n edge_endpoint[(i - i_start) * 2 + 0] = v0;\n edge_endpoint[(i - i_start) * 2 + 1] = v1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pred(2 * (i_end - i_start), edge_endpoint, edge_owner, edge_local);\n begin_gather(pred_win);\nfor (i = i_start; i < i_end; ++i) {\n add_gather_request(pred_win, (i - i_start) * 2 + 0, edge_owner[(i - i_start) * 2 + 0], edge_local[(i - i_start) * 2 + 0], (i - i_start) * 2 + 0);\n add_gather_request(pred_win, (i - i_start) * 2 + 1, edge_owner[(i - i_start) * 2 + 1], edge_local[(i - i_start) * 2 + 1], (i - i_start) * 2 + 1);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for reduction(&&:validation_passed) reduction(+:edge_visit_count)", "context_chars": 100, "text": ") * 2 + 1);\n }\n end_gather(pred_win);\n begin_scatter_constant(pred_valid_win);\nfor (i = i_start; i < i_end; ++i) {\n int64_t src = get_v0_from_edge(&buf[i]);\n int64_t tgt = get_v1_from_edge(&buf[i]);\n uint16_t src_depth = get_depth_from_pred_entry(edge_preds[(i - i_start) * 2 + 0]);\n uint16_t tgt_depth = get_depth_from_pred_entry(edge_preds[(i - i_start) * 2 + 1]);\n if (src_depth != UINT16_MAX && tgt_depth == UINT16_MAX) {\n fprintf(stderr, \"%d: Validation error: edge connects vertex %\" PRId64 \" in the BFS tree (depth %\" PRIu16 \") to vertex %\" PRId64 \" outside the tree.\\n\", rank, src, src_depth, tgt);\n validation_passed = 0;\n } else if (src_depth == UINT16_MAX && tgt_depth != UINT16_MAX) {\n fprintf(stderr, \"%d: Validation error: edge connects vertex %\" PRId64 \" in the BFS tree (depth %\" PRIu16 \") to vertex %\" PRId64 \" outside the tree.\\n\", rank, tgt, tgt_depth, src);\n validation_passed = 0;\n } else if (src_depth - tgt_depth < -1 ||\n src_depth - tgt_depth > 1) {\n fprintf(stderr, \"%d: Validation error: depths of edge endpoints %\" PRId64 \" (depth %\" PRIu16 \") and %\" PRId64 \" (depth %\" PRIu16 \") are too far apart (abs. val. > 1).\\n\", rank, src, src_depth, tgt, tgt_depth);\n validation_passed = 0;\n } else if (src_depth != UINT16_MAX) {\n ++edge_visit_count;\n }\n if (get_pred_from_pred_entry(edge_preds[(i - i_start) * 2 + 0]) == tgt) {\n add_scatter_constant_request(pred_valid_win, edge_owner[(i - i_start) * 2 + 0], edge_local[(i - i_start) * 2 + 0], (i - i_start) * 2 + 0);\n }\n if (get_pred_from_pred_entry(edge_preds[(i - i_start) * 2 + 1]) == src) {\n add_scatter_constant_request(pred_valid_win, edge_owner[(i - i_start) * 2 + 1], edge_local[(i - i_start) * 2 + 1], (i - i_start) * 2 + 1);\n }\n } #pragma omp parallel for reduction(&&:validation_passed) reduction(+:edge_visit_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/graph500-2.1.4/mpi/validate.c", "omp_pragma_line": "#pragma omp parallel for reduction(&&:validation_passed)", "context_chars": 100, "text": "dge_local);\n free(edge_endpoint);\n destroy_scatter_constant(pred_valid_win);\n ptrdiff_t i;\nfor (i = 0; i < (ptrdiff_t)nlocalverts; ++i) {\n int64_t p = get_pred_from_pred_entry(pred[i]);\n if (p == -1) continue;\n int found_pred_edge = pred_valid[i];\n if (root_owner == rank && root_local == (size_t)i) found_pred_edge = 1; /* Root vertex */\n if (!found_pred_edge) {\n int64_t v = vertex_to_global_for_pred(rank, i);\n fprintf(stderr, \"%d: Validation error: no graph edge from vertex %\" PRId64 \" to its parent %\" PRId64 \".\\n\", rank, v, get_pred_from_pred_entry(pred[i]));\n validation_passed = 0;\n }\n } #pragma omp parallel for reduction(&&:validation_passed)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:sum)", "context_chars": 100, "text": "bit_and=1;\n\tbit_or=0;\n\texclusiv_bit_or=0;\n\n\tknown_sum = (LOOPCOUNT*(LOOPCOUNT+1))/2;\n\nfor (i=1;i<=LOOPCOUNT;i++)\n\t{\n\t\tsum=sum+i;\n\t} #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:diff)", "context_chars": 100, "text": " integers: Result was %d instead of %d\\n\",sum,known_sum); \n\t}\n\n\tdiff = (LOOPCOUNT*(LOOPCOUNT+1))/2;\nfor (i=1;i<=LOOPCOUNT;++i)\n\t{\n\t\tdiff=diff-i;\n\t} #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:diff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:dsum)", "context_chars": 100, "text": "s */\n\tdsum=0;\n\tdpt=1;\n\tfor (i=0;ifor (i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:dsum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:ddiff)", "context_chars": 100, "text": ";\n\n\tfor (i=0;ifor (i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:ddiff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(*:product)", "context_chars": 100, "text": "+;\n\t\tfprintf(logFile,\"Error in Difference with doubles: Result was %E instead of 0.0\\n\",ddiff);\n\t}\n\nfor(i=1;i<=MAX_FACTOR;i++)\n\t{\n\t\tproduct *= i;\n\t} #pragma omp parallel for schedule(dynamic,1) private(i) reduction(*:product)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)", "context_chars": 100, "text": "s %d instead of %d\\n\\n\",product,known_product);\n\t}\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)", "context_chars": 100, "text": "++;\n\t\tfprintf(logFile,\"Error in logic AND part 1.\\n\");\n\t}\n\n\tlogic_and = 1;\n\tlogics[LOOPCOUNT/2]=0;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)", "context_chars": 100, "text": "printf(logFile,\"Error in logic AND part 2.\\n\");\n\t}\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)", "context_chars": 100, "text": "ult++;\n\t\tfprintf(logFile,\"Error in logic OR part 1.\\n\");\n\t}\n\tlogic_or = 0;\n\tlogics[LOOPCOUNT/2]=1;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)", "context_chars": 100, "text": "printf(logFile,\"Error in logic OR part 2.\\n\");\n\t}\n\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)", "context_chars": 100, "text": "sult++;\n\t\tfprintf(logFile,\"Error in BIT AND part 1.\\n\");\n\t}\n\n\tbit_and = 1;\n\tlogics[LOOPCOUNT/2]=0;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)", "context_chars": 100, "text": "\tfprintf(logFile,\"Error in BIT AND part 2.\\n\");\n\t}\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)", "context_chars": 100, "text": "\t\tresult++;\n\t\tfprintf(logFile,\"Error in BIT OR part 1\\n\");\n\t}\n\tbit_or = 0;\n\tlogics[LOOPCOUNT/2]=1;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)", "context_chars": 100, "text": "\n\t\tfprintf(logFile,\"Error in BIT OR part 2\\n\");\n\t}\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)", "context_chars": 100, "text": "tf(logFile,\"Error in EXCLUSIV BIT OR part 1\\n\");\n\t}\n\n\texclusiv_bit_or = 0;\n\tlogics[LOOPCOUNT/2]=1;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_private.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) schedule(static,1) private(i) private(i2)", "context_chars": 100, "text": "m;\n int i;\n int i2;\n \n \n sum =0;\n i2=0;\n int known_sum;\n \n\n \n for (i=1;i<=LOOPCOUNT;i++)\n {\n\ti2 = i;\n#pragma omp flush\n\tdo_some_work ();\n#pragma omp flush\n\tsum = sum + i2;\n } #pragma omp parallel for reduction(+:sum) schedule(static,1) private(i) private(i2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_ordered.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) private(i) ordered", "context_chars": 100, "text": " is_larger;\n\t\n\n\tint known_sum;\n\tint i;\n\t\n\tsum = 0;\n\tis_larger = 1;\n\tlast_i = 0;\nfor (i = 1; i < 100; i++)\n\t{\n\t\tii = i;\n\t\n#pragma omp ordered\n\t\t{\n\t\t\tis_larger = check_i_islarger2 (ii) && is_larger;\n\t\t\tsum = sum + ii;\n\t\t}\n\t\n\t} #pragma omp parallel for schedule(static,1) private(i) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:sum)", "context_chars": 100, "text": "lusiv_bit_or=0;\n result=0;\n\tdt = 1./3.;\n\tknown_sum = (LOOPCOUNT*(LOOPCOUNT+1))/2;\n\nfor (i=1;i<=LOOPCOUNT;i++)\n\t{\n\t\tsum=sum+i;\n\t} #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:diff)", "context_chars": 100, "text": " integers: Result was %d instead of %d\\n\",sum,known_sum); \n\t}\n\n\tdiff = (LOOPCOUNT*(LOOPCOUNT+1))/2;\nfor (i=1;i<=LOOPCOUNT;++i)\n\t{\n\t\tdiff=diff-i;\n\t} #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:diff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:dsum)", "context_chars": 100, "text": "s */\n\tdsum=0;\n\tdpt=1;\n\tfor (i=0;ifor (i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:dsum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:ddiff)", "context_chars": 100, "text": ";\n\n\tfor (i=0;ifor (i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:ddiff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(*:product)", "context_chars": 100, "text": "+;\n\t\tfprintf(logFile,\"Error in Difference with doubles: Result was %E instead of 0.0\\n\",ddiff);\n\t}\n\nfor(i=1;i<=MAX_FACTOR;i++)\n\t{\n\t\tproduct *= i;\n\t} #pragma omp parallel for schedule(dynamic,1) private(i) reduction(*:product)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)", "context_chars": 100, "text": "s %d instead of %d\\n\\n\",product,known_product);\n\t}\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)", "context_chars": 100, "text": "++;\n\t\tfprintf(logFile,\"Error in logic AND part 1.\\n\");\n\t}\n\n\tlogic_and = 1;\n\tlogics[LOOPCOUNT/2]=0;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)", "context_chars": 100, "text": "printf(logFile,\"Error in logic AND part 2.\\n\");\n\t}\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)", "context_chars": 100, "text": "ult++;\n\t\tfprintf(logFile,\"Error in logic OR part 1.\\n\");\n\t}\n\tlogic_or = 0;\n\tlogics[LOOPCOUNT/2]=1;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)", "context_chars": 100, "text": "printf(logFile,\"Error in logic OR part 2.\\n\");\n\t}\n\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)", "context_chars": 100, "text": "sult++;\n\t\tfprintf(logFile,\"Error in BIT AND part 1.\\n\");\n\t}\n\n\tbit_and = 1;\n\tlogics[LOOPCOUNT/2]=0;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)", "context_chars": 100, "text": "\tfprintf(logFile,\"Error in BIT AND part 2.\\n\");\n\t}\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)", "context_chars": 100, "text": "\t\tresult++;\n\t\tfprintf(logFile,\"Error in BIT OR part 1\\n\");\n\t}\n\tbit_or = 0;\n\tlogics[LOOPCOUNT/2]=1;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)", "context_chars": 100, "text": "\n\t\tfprintf(logFile,\"Error in BIT OR part 2\\n\");\n\t}\n\n\tfor(i=0;ifor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_reduction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)", "context_chars": 100, "text": "tf(logFile,\"Error in EXCLUSIV BIT OR part 1\\n\");\n\t}\n\n\texclusiv_bit_or = 0;\n\tlogics[LOOPCOUNT/2]=1;\n\nfor(i=0;i #pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_lastprivate.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) schedule(static,7) private(i) lastprivate(i0)private(i0)", "context_chars": 100, "text": " sum;\n int i;\n int i0;\n \n\n sum =0;\n i0 = -1;\n int known_sum;\n\n\n for (i = 1; i <= LOOPCOUNT; i++)\n {\n\tsum = sum + i;\n\ti0 = i;\n } #pragma omp parallel for reduction(+:sum) schedule(static,7) private(i) lastprivate(i0)private(i0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_if.c", "omp_pragma_line": "#pragma omp parallel for private(i) if (control==1)", "context_chars": 100, "text": "t control;\n \n control = 0;\n num_threads=0;\n sum = 0;\n sum2 = 0;\n\n\n for (i=0; i <= LOOPCOUNT; i++)\n {\n num_threads = omp_get_num_threads();\n\tsum = sum + i;\n } #pragma omp parallel for private(i) if (control==1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/OpenUH/c/omp_parallel_for_firstprivate.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) private(i) firstprivate(i2)private(i2)", "context_chars": 100, "text": ";\n int i2;\n int i;\n \n \n sum=0;\n i2=3;\n int known_sum;\n\n \n for (i = 1; i <= LOOPCOUNT; i++)\n {\n\t sum = sum + (i + i2);\n } #pragma omp parallel for reduction(+:sum) private(i) firstprivate(i2)private(i2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/EPCC/C/syncbench.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "aylength);\n\t }\n\t}\n }\n}\n\nvoid testpfor() {\n int i, j;\n for (j = 0; j < innerreps; j++) {\nfor (i = 0; i < nthreads; i++) {\n\t delay(delaylength);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/EPCC/C/syncbench.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (static,1)", "context_chars": 100, "text": "ck);\n\t delay(delaylength);\n\t omp_unset_lock(&lock);\n\t}\n }\n}\n\nvoid testorder() {\n int j;\nfor (j = 0; j < (int)innerreps; j++) {\n#pragma omp ordered\n\tdelay(delaylength);\n } #pragma omp parallel for ordered schedule (static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module two's work using \"omp parallel for schedule(static)\"\n * (contains 2 subcycles) \n */\nvoid static_omp_module2()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 2 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 2 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 2 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module three's work using \"omp parallel for schedule(static)\"\n * (contains 3 subcycles) \n */\nvoid static_omp_module3()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module three's work using \"omp parallel for schedule(static)\"\n * (contains 3 subcycles) \n */\nvoid static_omp_module3()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module four's work using \"omp parallel for schedule(static)\"\n * (contains 4 subcycles) \n */\nvoid static_omp_module4()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module four's work using \"omp parallel for schedule(static)\"\n * (contains 4 subcycles) \n */\nvoid static_omp_module4()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module four's work using \"omp parallel for schedule(static)\"\n * (contains 4 subcycles) \n */\nvoid static_omp_module4()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do one cycle (10 subcycles) using \"omp parallel for schedule(static)\" */\nvoid static_omp_cycle()\n{\n /* Emulate calls to 4 different packages, do 10 subcycles total */\n static_omp_module1();\n static_omp_module2();\n static_omp_module3();\n static_omp_module4();\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do one cycle (10 subcycles) using \"omp parallel for schedule(static)\" */\nvoid static_omp_cycle()\n{\n /* Emulate calls to 4 different packages, do 10 subcycles total */\n static_omp_module1();\n static_omp_module2();\n static_omp_module3();\n static_omp_module4();\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(static)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do one cycle (10 subcycles) using \"omp parallel for schedule(static)\" */\nvoid static_omp_cycle()\n{\n /* Emulate calls to 4 different packages, do 10 subcycles total */\n static_omp_module1();\n static_omp_module2();\n static_omp_module3();\n static_omp_module4();\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do one cycle (10 subcycles) using \"omp parallel for schedule(static)\" */\nvoid static_omp_cycle()\n{\n /* Emulate calls to 4 different packages, do 10 subcycles total */\n static_omp_module1();\n static_omp_module2();\n static_omp_module3();\n static_omp_module4();\n} #pragma omp parallel for private (pidx) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module two's work using \"omp parallel for schedule(dynamic)\"\n * (contains 2 subcycles) \n */\nvoid dynamic_omp_module2()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 2 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 2 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 2 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module three's work using \"omp parallel for schedule(dynamic)\"\n * (contains 3 subcycles) \n */\nvoid dynamic_omp_module3()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module three's work using \"omp parallel for schedule(dynamic)\"\n * (contains 3 subcycles) \n */\nvoid dynamic_omp_module3()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module four's work using \"omp parallel for schedule(dynamic)\"\n * (contains 4 subcycles) \n */\nvoid dynamic_omp_module4()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 3 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module four's work using \"omp parallel for schedule(dynamic)\"\n * (contains 4 subcycles) \n */\nvoid dynamic_omp_module4()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n/* Do module four's work using \"omp parallel for schedule(dynamic)\"\n * (contains 4 subcycles) \n */\nvoid dynamic_omp_module4()\n{\n double deposit;\n long pidx;\n\n /* ---------------- SUBCYCLE 1 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 2 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n\n/* Do one cycle (10 subcycles) using \"omp parallel for schedule(dynamic)\" */\nvoid dynamic_omp_cycle()\n{\n /* Emulate calls to 4 different packages, do 10 subcycles total */\n dynamic_omp_module1();\n dynamic_omp_module2();\n dynamic_omp_module3();\n dynamic_omp_module4();\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 3 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n\n/* Do one cycle (10 subcycles) using \"omp parallel for schedule(dynamic)\" */\nvoid dynamic_omp_cycle()\n{\n /* Emulate calls to 4 different packages, do 10 subcycles total */\n dynamic_omp_module1();\n dynamic_omp_module2();\n dynamic_omp_module3();\n dynamic_omp_module4();\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n\n /* ---------------- SUBCYCLE 4 OF 4 ----------------- */\n\n /* Calculate deposit for this subcycle based on last subcycle's residue */\n deposit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\n#pragma omp parallel for private (pidx) schedule(dynamic)\n for (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n\n/* Do one cycle (10 subcycles) using \"omp parallel for schedule(dynamic)\" */\nvoid dynamic_omp_cycle()\n{\n /* Emulate calls to 4 different packages, do 10 subcycles total */\n dynamic_omp_module1();\n dynamic_omp_module2();\n dynamic_omp_module3();\n dynamic_omp_module4();\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)", "context_chars": 100, "text": "posit = calc_deposit ();\n \n /* Scan through zones and add appropriate deposit to each zone */\nfor (pidx = 0; pidx < CLOMP_numParts; pidx++)\n\tupdate_part (partArray[pidx], deposit);\n}\n\n\n/* Do one cycle (10 subcycles) using \"omp parallel for schedule(dynamic)\" */\nvoid dynamic_omp_cycle()\n{\n /* Emulate calls to 4 different packages, do 10 subcycles total */\n dynamic_omp_module1();\n dynamic_omp_module2();\n dynamic_omp_module3();\n dynamic_omp_module4();\n} #pragma omp parallel for private (pidx) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private(partId) schedule(static) ", "context_chars": 100, "text": " may be set to 1 for allocate)\n * to allow potentially better memory layout for threads\n */\nfor (partId = 0; partId < CLOMP_numParts; partId++)\n {\n\tPart *part;\n\tif ((part= (Part *) malloc (sizeof (Part))) == NULL)\n\t{\n\t fprintf (stderr, \"Out of memory allocating part\\n\");\n\t exit (1);\n\t}\n\n\t/* Call standard part initializer for part just allocated.\n\t * Allows parts to be allocated as desired.\n\t */\n\taddPart(part, partId);\n } #pragma omp parallel for private(partId) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private(partId) schedule(static) ", "context_chars": 100, "text": "rt just allocated.\n\t * Allows parts to be allocated as desired.\n\t */\n\taddPart(part, partId);\n }\n\n/* Create and add zones to parts.\n * Do allocations in thread (allocThreads may be set to 1 for allocate)\n * to allow potentially better memory layout for threads\n */\n for (partId = 0; partId < CLOMP_numParts; partId++)\n {\n\tZone *zoneArray, *zone;\n\tint zoneId;\n\t\n\t/* Allocate an array of zones for this part */\n\tzoneArray = (Zone *)malloc (CLOMP_zoneSize * CLOMP_zonesPerPart);\n\tif (zoneArray == NULL)\n\t{\n\t fprintf (stderr, \"Out of memory allocate zone array\\n\");\n\t exit (1);\n\t}\n\t\n\t/* Put all zones into part's zone linked list */\n\tfor (zoneId = 0; zoneId < CLOMP_zonesPerPart; zoneId++)\n\t{\n\t /* Get the current zone being placed */\n\t zone = &zoneArray[zoneId];\n\t \n\t /* Add it to the end of the the part */\n\t addZone (partArray[partId], zone);\n\t}\n\t\n#if 0\n\t/* Print out memory address for zoneArray to see where it maps */\n\tprintf (\"Part %i threadId %i: zones %p - %p\\n\", (int)partId,\n\t\tomp_get_thread_num(), zoneArray, &zoneArray[CLOMP_zonesPerPart-1]);\n\n } #pragma omp parallel for private(partId) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(static)\");", "context_chars": 100, "text": "print_pseudocode (\"Static OMP\", \"deposit = calc_deposit ();\");\n print_pseudocode (\"Static OMP\", \"print_pseudocode (\"Static OMP\", \"for (pidx = 0; pidx lt numParts; pidx++)\");\n print_pseudocode (\"Static OMP\", \" update_part (partArray[pidx], deposit);\");\n print_pseudocode (\"Static OMP\", \"------- End Static OMP Pseudocode -------\");\n print_start_message (\"Static OMP\");\n#ifdef WITH_MPI\n /* Ensure all MPI tasks run OpenMP at the same time */\n MPI_Barrier (MPI_COMM_WORLD);\n\n get_timestamp (&static_omp_start_ts);\n do_static_omp_version();\n get_timestamp (&static_omp_end_ts);\n\n /* Check data for consistency and print out data stats*/\n print_data_stats (\"Static OMP\");\n\n /* Print out serial time stats and capture time.\n * Also print speedup compared to serial run time.\n */\n static_omp_seconds = print_timestats (\"Static OMP\", \n\t\t\t\t\t &static_omp_start_ts, \n\t\t\t\t\t &static_omp_end_ts,\n\t\t\t\t\t serial_ref_seconds,\n\t\t\t\t\t bestcase_omp_seconds);\n\n /* --------- Start Dynamic OMP benchmark measurement --------- */\n\n /* Do one cycle outside timer loop to warm up code and (for OpenMP cases)\n * allow the OpenMP system to initialize (which can be expensive, skewing\n * the measurments for small runtimes */\n reinitialize_parts();\n dynamic_omp_cycle();\n\n /* Reinitialize parts and warm up cache by doing dummy update */\n reinitialize_parts();\n\n /* Do the OMP Dynamic OMP version of calculation and measure time*/\n print_pseudocode (\"Dynamic OMP\", \"------ Start Dynamic OMP Pseudocode ------\");\n print_pseudocode (\"Dynamic OMP\", \"/* Use OpenMP parallel for schedule(dynamic) on orig loop. */\");\n print_pseudocode (\"Dynamic OMP\", \"deposit = calc_deposit ();\");\n print_pseudocode (\"Dynamic OMP\", \"#pragma omp parallel for private (pidx) schedule(dynamic)\");\n print_pseudocode (\"Dynamic OMP\", \"for (pidx = 0; pidx lt numParts; pidx++)\");\n print_pseudocode (\"Dynamic OMP\", \" update_part (partArray[pidx], deposit);\");\n print_pseudocode (\"Dynamic OMP\", \"------- End Dynamic OMP Pseudocode -------\");\n print_start_message (\"Dynamic OMP\");\n#ifdef WITH_MPI\n /* Ensure all MPI tasks run OpenMP at the same time */\n MPI_Barrier (MPI_COMM_WORLD);\n\n get_timestamp (&dynamic_omp_start_ts);\n do_dynamic_omp_version();\n get_timestamp (&dynamic_omp_end_ts);\n\n /* Check data for consistency and print out data stats*/\n print_data_stats (\"Dynamic OMP\");\n\n /* Print out serial time stats and capture time.\n * Also print speedup compared to serial run time.\n */\n dynamic_omp_seconds = print_timestats (\"Dynamic OMP\", \n\t\t\t\t\t &dynamic_omp_start_ts, \n\t\t\t\t\t &dynamic_omp_end_ts,\n\t\t\t\t\t serial_ref_seconds,\n\t\t\t\t\t bestcase_omp_seconds);\n\n\n /* --------- Start Manual OMP benchmark measurement --------- */\n\n /* Do one cycle outside timer loop to warm up code and (for OpenMP cases)\n * allow the OpenMP system to initialize (which can be expensive, skewing\n * the measurments for small runtimes */\n reinitialize_parts();\n do_manual_omp_version(1);\n\n /* Reinitialize parts and warm up cache by doing dummy update */\n reinitialize_parts();\n\n /* Do the OMP Manual OMP version of calculation and measure time*/\n print_pseudocode (\"Manual OMP\", \"------ Start Manual OMP Pseudocode ------\");\n print_pseudocode (\"Manual OMP\", \"/* At top level, spawn threads and manually partition parts*/\");\n print_pseudocode (\"Manual OMP\", \"#pragma omp parallel\");\n print_pseudocode (\"Manual OMP\", \"{\");\n print_pseudocode (\"Manual OMP\", \" int startPidx = ... /* slice based on thread_id*/\");\n print_pseudocode (\"Manual OMP\", \" for (iter = 0; iter lt num_iterations; iter++) \");\n print_pseudocode (\"Manual OMP\", \" do_iter(startPidx, endPidx);\");\n print_pseudocode (\"Manual OMP\", \"} #pragma omp parallel for private (pidx) schedule(static)\");"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c", "omp_pragma_line": "#pragma omp parallel for private (pidx) schedule(dynamic)\");", "context_chars": 100, "text": "int_pseudocode (\"Dynamic OMP\", \"deposit = calc_deposit ();\");\n print_pseudocode (\"Dynamic OMP\", \"print_pseudocode (\"Dynamic OMP\", \"for (pidx = 0; pidx lt numParts; pidx++)\");\n print_pseudocode (\"Dynamic OMP\", \" update_part (partArray[pidx], deposit);\");\n print_pseudocode (\"Dynamic OMP\", \"------- End Dynamic OMP Pseudocode -------\");\n print_start_message (\"Dynamic OMP\");\n#ifdef WITH_MPI\n /* Ensure all MPI tasks run OpenMP at the same time */\n MPI_Barrier (MPI_COMM_WORLD);\n\n get_timestamp (&dynamic_omp_start_ts);\n do_dynamic_omp_version();\n get_timestamp (&dynamic_omp_end_ts);\n\n /* Check data for consistency and print out data stats*/\n print_data_stats (\"Dynamic OMP\");\n\n /* Print out serial time stats and capture time.\n * Also print speedup compared to serial run time.\n */\n dynamic_omp_seconds = print_timestats (\"Dynamic OMP\", \n\t\t\t\t\t &dynamic_omp_start_ts, \n\t\t\t\t\t &dynamic_omp_end_ts,\n\t\t\t\t\t serial_ref_seconds,\n\t\t\t\t\t bestcase_omp_seconds);\n\n\n /* --------- Start Manual OMP benchmark measurement --------- */\n\n /* Do one cycle outside timer loop to warm up code and (for OpenMP cases)\n * allow the OpenMP system to initialize (which can be expensive, skewing\n * the measurments for small runtimes */\n reinitialize_parts();\n do_manual_omp_version(1);\n\n /* Reinitialize parts and warm up cache by doing dummy update */\n reinitialize_parts();\n\n /* Do the OMP Manual OMP version of calculation and measure time*/\n print_pseudocode (\"Manual OMP\", \"------ Start Manual OMP Pseudocode ------\");\n print_pseudocode (\"Manual OMP\", \"/* At top level, spawn threads and manually partition parts*/\");\n print_pseudocode (\"Manual OMP\", \"#pragma omp parallel\");\n print_pseudocode (\"Manual OMP\", \"{\");\n print_pseudocode (\"Manual OMP\", \" int startPidx = ... /* slice based on thread_id*/\");\n print_pseudocode (\"Manual OMP\", \" for (iter = 0; iter lt num_iterations; iter++) \");\n print_pseudocode (\"Manual OMP\", \" do_iter(startPidx, endPidx);\");\n print_pseudocode (\"Manual OMP\", \"} #pragma omp parallel for private (pidx) schedule(dynamic)\");"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) ordered", "context_chars": 100, "text": " Schedule static\\n#################################################\\n\");\n cnt_static = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_static (j, \"static\");\n }\n } #pragma omp parallel for schedule(static) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, cs) ordered", "context_chars": 100, "text": "(%d)\\n#################################################\\n\", cs);\n cnt_static_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_static_chunked (j, \"static chunked\");\n }\n } #pragma omp parallel for schedule(static, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) ordered", "context_chars": 100, "text": "chedule dynamic\\n#################################################\\n\");\n cnt_dynamic = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_dynamic (j, \"dynamic\");\n }\n } #pragma omp parallel for schedule(dynamic) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, cs) ordered", "context_chars": 100, "text": "%d)\\n#################################################\\n\", cs);\n cnt_dynamic_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_dynamic_chunked (j, \"dynamic chunked\");\n }\n } #pragma omp parallel for schedule(dynamic, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) ordered", "context_chars": 100, "text": " Schedule guided\\n#################################################\\n\");\n cnt_guided = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_guided (j, \"guided\");\n }\n } #pragma omp parallel for schedule(guided) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided, cs) ordered", "context_chars": 100, "text": "(%d)\\n#################################################\\n\", cs);\n cnt_guided_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_guided_chunked (j, \"guided chunked\");\n }\n } #pragma omp parallel for schedule(guided, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c", "omp_pragma_line": "#pragma omp parallel for schedule(auto) ordered", "context_chars": 100, "text": "ered Schedule auto\\n#################################################\\n\");\n cnt_auto = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_auto (j, \"auto\");\n }\n } #pragma omp parallel for schedule(auto) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c", "omp_pragma_line": "#pragma omp parallel for schedule(runtime) ordered", "context_chars": 100, "text": "chedule runtime\\n#################################################\\n\");\n cnt_runtime = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_runtime (j, \"runtime\");\n }\n } #pragma omp parallel for schedule(runtime) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) ordered", "context_chars": 100, "text": " Schedule static\\n#################################################\\n\");\n cnt_static = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_static (j, \"static\");\n }\n } #pragma omp parallel for schedule(static) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, cs) ordered", "context_chars": 100, "text": "(%d)\\n#################################################\\n\", cs);\n cnt_static_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_static_chunked (j, \"static chunked\");\n }\n } #pragma omp parallel for schedule(static, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) ordered", "context_chars": 100, "text": "chedule dynamic\\n#################################################\\n\");\n cnt_dynamic = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_dynamic (j, \"dynamic\");\n }\n } #pragma omp parallel for schedule(dynamic) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, cs) ordered", "context_chars": 100, "text": "%d)\\n#################################################\\n\", cs);\n cnt_dynamic_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_dynamic_chunked (j, \"dynamic chunked\");\n }\n } #pragma omp parallel for schedule(dynamic, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) ordered", "context_chars": 100, "text": " Schedule guided\\n#################################################\\n\");\n cnt_guided = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_guided (j, \"guided\");\n }\n } #pragma omp parallel for schedule(guided) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided, cs) ordered", "context_chars": 100, "text": "(%d)\\n#################################################\\n\", cs);\n cnt_guided_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_guided_chunked (j, \"guided chunked\");\n }\n } #pragma omp parallel for schedule(guided, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c", "omp_pragma_line": "#pragma omp parallel for schedule(auto) ordered", "context_chars": 100, "text": "ered Schedule auto\\n#################################################\\n\");\n cnt_auto = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_auto (j, \"auto\");\n }\n } #pragma omp parallel for schedule(auto) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c", "omp_pragma_line": "#pragma omp parallel for schedule(runtime) ordered", "context_chars": 100, "text": "chedule runtime\\n#################################################\\n\");\n cnt_runtime = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_runtime (j, \"runtime\");\n }\n } #pragma omp parallel for schedule(runtime) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) ordered", "context_chars": 100, "text": " Schedule static\\n#################################################\\n\");\n cnt_static = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_static (j, \"static\");\n }\n } #pragma omp parallel for schedule(static) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, cs) ordered", "context_chars": 100, "text": "(%d)\\n#################################################\\n\", cs);\n cnt_static_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_static_chunked (j, \"static chunked\");\n }\n } #pragma omp parallel for schedule(static, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) ordered", "context_chars": 100, "text": "chedule dynamic\\n#################################################\\n\");\n cnt_dynamic = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_dynamic (j, \"dynamic\");\n }\n } #pragma omp parallel for schedule(dynamic) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, cs) ordered", "context_chars": 100, "text": "%d)\\n#################################################\\n\", cs);\n cnt_dynamic_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_dynamic_chunked (j, \"dynamic chunked\");\n }\n } #pragma omp parallel for schedule(dynamic, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) ordered", "context_chars": 100, "text": " Schedule guided\\n#################################################\\n\");\n cnt_guided = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_guided (j, \"guided\");\n }\n } #pragma omp parallel for schedule(guided) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided, cs) ordered", "context_chars": 100, "text": "(%d)\\n#################################################\\n\", cs);\n cnt_guided_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_guided_chunked (j, \"guided chunked\");\n }\n } #pragma omp parallel for schedule(guided, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c", "omp_pragma_line": "#pragma omp parallel for schedule(auto) ordered", "context_chars": 100, "text": "ered Schedule auto\\n#################################################\\n\");\n cnt_auto = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_auto (j, \"auto\");\n }\n } #pragma omp parallel for schedule(auto) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c", "omp_pragma_line": "#pragma omp parallel for schedule(runtime) ordered", "context_chars": 100, "text": "chedule runtime\\n#################################################\\n\");\n cnt_runtime = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_runtime (j, \"runtime\");\n }\n } #pragma omp parallel for schedule(runtime) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) ordered", "context_chars": 100, "text": " Schedule static\\n#################################################\\n\");\n cnt_static = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_static (j, \"static\");\n }\n } #pragma omp parallel for schedule(static) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, cs) ordered", "context_chars": 100, "text": "(%d)\\n#################################################\\n\", cs);\n cnt_static_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_static_chunked (j, \"static chunked\");\n }\n } #pragma omp parallel for schedule(static, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) ordered", "context_chars": 100, "text": "chedule dynamic\\n#################################################\\n\");\n cnt_dynamic = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_dynamic (j, \"dynamic\");\n }\n } #pragma omp parallel for schedule(dynamic) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, cs) ordered", "context_chars": 100, "text": "%d)\\n#################################################\\n\", cs);\n cnt_dynamic_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_dynamic_chunked (j, \"dynamic chunked\");\n }\n } #pragma omp parallel for schedule(dynamic, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) ordered", "context_chars": 100, "text": " Schedule guided\\n#################################################\\n\");\n cnt_guided = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_guided (j, \"guided\");\n }\n } #pragma omp parallel for schedule(guided) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided, cs) ordered", "context_chars": 100, "text": "(%d)\\n#################################################\\n\", cs);\n cnt_guided_chunked = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_guided_chunked (j, \"guided chunked\");\n }\n } #pragma omp parallel for schedule(guided, cs) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c", "omp_pragma_line": "#pragma omp parallel for schedule(auto) ordered", "context_chars": 100, "text": "ered Schedule auto\\n#################################################\\n\");\n cnt_auto = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_auto (j, \"auto\");\n }\n } #pragma omp parallel for schedule(auto) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c", "omp_pragma_line": "#pragma omp parallel for schedule(runtime) ordered", "context_chars": 100, "text": "chedule runtime\\n#################################################\\n\");\n cnt_runtime = init;\n for (j = init; j < size; j++)\n {\n #pragma omp ordered\n {\n check_runtime (j, \"runtime\");\n }\n } #pragma omp parallel for schedule(runtime) ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/NAS/IS/is.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "; i++) {\n bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS);\n }\n\n for( i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/NAS/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k,k1) schedule(static,1)", "context_chars": 100, "text": "ETS\n\n /* Buckets are already sorted. Sorting keys within each bucket */\n#ifdef SCHED_CYCLIC\n #else\n #pragma omp parallel for private(i,j,k,k1) schedule(dynamic)\n\n for( j=0; j< NUM_BUCKETS; j++ ) {\n\n k1 = (j > 0)? bucket_ptrs[j-1] : 0;\n for ( i = k1; i < bucket_ptrs[j]; i++ ) {\n k = --key_buff_ptr_global[key_buff2[i]];\n key_array[k] = key_buff2[i];\n }\n } #pragma omp parallel for private(i,j,k,k1) schedule(static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/NAS/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k,k1) schedule(dynamic)", "context_chars": 100, "text": " */\n#ifdef SCHED_CYCLIC\n #pragma omp parallel for private(i,j,k,k1) schedule(static,1)\n#else\n for( j=0; j< NUM_BUCKETS; j++ ) {\n\n k1 = (j > 0)? bucket_ptrs[j-1] : 0;\n for ( i = k1; i < bucket_ptrs[j]; i++ ) {\n k = --key_buff_ptr_global[key_buff2[i]];\n key_array[k] = key_buff2[i];\n }\n } #pragma omp parallel for private(i,j,k,k1) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/NAS/IS/is.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:j)", "context_chars": 100, "text": "endif\n\n\n/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */\n\n j = 0;\n for( i=1; i key_array[i] )\n j++;\n\n if( j != 0 )\n printf( \"Full_verify: number of keys out of sort: %ld\\n\", (long)j );\n else\n passed_verification++;\n\n}\n\n\n\n\n/*****************************************************************/\n/************* R A N K ****************/\n/*****************************************************************/\n\n\nvoid rank( int iteration )\n{\n\n INT_TYPE i, k;\n INT_TYPE *key_buff_ptr, *key_buff_ptr2;\n\n#ifdef USE_BUCKETS\n int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;\n INT_TYPE num_bucket_keys = (1L << shift);\n\n\n\n key_array[iteration] = iteration;\n key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;\n\n\n/* Determine where the partial verify test keys are, load into */\n/* top of array bucket_size */\n for( i=0; i> shift]++;\n\n/* Accumulative bucket sizes are the bucket pointers.\n These are global sizes accumulated upon to each bucket */\n bucket_ptrs[0] = 0;\n for( k=0; k< myid; k++ ) \n bucket_ptrs[0] += bucket_size[k][0];\n\n for( i=1; i< NUM_BUCKETS; i++ ) { \n bucket_ptrs[i] = bucket_ptrs[i-1];\n for( k=0; k< myid; k++ )\n bucket_ptrs[i] += bucket_size[k][i];\n for( k=myid; k< num_procs; k++ )\n bucket_ptrs[i] += bucket_size[k][i-1];\n }\n\n\n/* Sort into appropriate bucket */\n #pragma omp for schedule(static)\n for( i=0; i> shift]++] = k;\n }\n\n/* The bucket pointers now point to the final accumulated sizes */\n if (myid < num_procs-1) {\n for( i=0; i< NUM_BUCKETS; i++ )\n for( k=myid+1; k< num_procs; k++ )\n bucket_ptrs[i] += bucket_size[k][i];\n }\n\n\n/* Now, buckets are sorted. We only need to sort keys inside\n each bucket, which can be done in parallel. Because the distribution\n of the number of keys in the buckets is Gaussian, the use of\n a dynamic schedule should improve load balance, thus, performance */\n\n#ifdef SCHED_CYCLIC\n #pragma omp for schedule(static,1)\n#else\n #pragma omp for schedule(dynamic)\n\n for( i=0; i< NUM_BUCKETS; i++ ) {\n\n/* Clear the work array section associated with each bucket */\n k1 = i * num_bucket_keys;\n k2 = k1 + num_bucket_keys;\n for ( k = k1; k < k2; k++ )\n key_buff_ptr[k] = 0;\n\n/* Ranking of all keys occurs in this section: */\n\n/* In this section, the keys themselves are used as their \n own indexes to determine how many of each there are: their\n individual population */\n m = (i > 0)? bucket_ptrs[i-1] : 0;\n for ( k = m; k < bucket_ptrs[i]; k++ )\n key_buff_ptr[key_buff_ptr2[k]]++; /* Now they have individual key */\n /* population */\n\n/* To obtain ranks of each key, successively add the individual key\n population, not forgetting to add m, the total of lesser keys,\n to the first key population */\n key_buff_ptr[k1] += m;\n for ( k = k1+1; k < k2; k++ )\n key_buff_ptr[k] += key_buff_ptr[k-1];\n\n }\n\n#else /*USE_BUCKETS*/\n\n\n work_buff = key_buff1_aptr[myid];\n\n\n/* Clear the work array */\n for( i=0; i #pragma omp parallel for reduction(+:j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/Threads/tbb/src/test/test_openmp.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " int start = ifor( int j=start; j(0,m+n-1,10), OuterBody( c, a, m, b, n ) );\n} #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/6/Gauss_Elimination/main.c", "omp_pragma_line": "#pragma omp parallel for shared (A, b, mat_size, A_aug)", "context_chars": 100, "text": "ouble A_aug[3][4] = {0,0,0,0,0,0,0,0,0,0,0,0};\n double x[3];\n\n // Define Augmented matrix\n for (int i = 0; i #pragma omp parallel for shared (A, b, mat_size, A_aug)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/6/Gauss_Elimination/main.c", "omp_pragma_line": "#pragma omp parallel for shared (A_aug, mat_size)", "context_chars": 100, "text": "\n\n // Developing the augmented matrix\n for (int col = 0; col < mat_size; col++)\n {\n for (int row = col+1; row < mat_size; row++)\n {\n double alp = A_aug[row][col] / A_aug[col][col];\n\n for (int k = 0; k < mat_size+1; k++)\n {\n A_aug[row][k] = A_aug[row][k] - alp*A_aug[col][k];\n }\n\n } #pragma omp parallel for shared (A_aug, mat_size)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/6/Gauss_Elimination/main.c", "omp_pragma_line": "#pragma omp parallel for shared(A_aug, x, mat_size) reduction(+:sum)", "context_chars": 100, "text": "olution vector\n for (int row = mat_size-2; row>= 0; row--)\n {\n double sum = 0;\n for (int col = row; col < mat_size; col++)\n {\n sum = sum + A_aug[row][col]*x[col];\n } #pragma omp parallel for shared(A_aug, x, mat_size) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/8/TDMA/main.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(A,b,mat_size) shared (x)", "context_chars": 100, "text": " b[i] = b[i] - m*b[i-1];\n }\n\n x[mat_size-1] = b[mat_size-1]/A[mat_size-1][mat_size-1];\n for (int i = mat_size-2; i >= 0; i--)\n {\n x[i] = (b[i] - A[i][i+1]*x[i+1]) / A[i][i];\n } #pragma omp parallel for firstprivate(A,b,mat_size) shared (x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/9/Bisection_Method/main.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:count) lastprivate(c) firstprivate (a,b, n_iters, tol)", "context_chars": 100, "text": ";\n double tol = 0.01; // Tolerance limit of the approximate solution to the exact solution\n\n for (int i = 0; i < n_iters; i++)\n {\n if (func_x(a) * func_x(b) < 0) // Condition for a and b to be on the opposite side of the root\n {\n c = (a+b)/2.0; // Midpoint\n if (func_x(c) * func_x(a) < 0)\n {\n b = c; // b and c are on the same side of the root\n count = count+1; // increment counter\n }\n else\n {\n a = c; // a and c are on the same side of the root\n count = count + 1; // increment counter\n }\n }\n\n if (func_x(c) == 0 || (b-a)/2.0 < tol) // Conditions for accepting the solution\n {\n #pragma omp cancel for // acts like a break command, but increases number of iterations significantly\n }\n } #pragma omp parallel for reduction(+:count) lastprivate(c) firstprivate (a,b, n_iters, tol)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/1/MacLaurin/main.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:sum)", "context_chars": 100, "text": "num_threads;\n\n // Parallel implementation\n\n double start_parallel_time = omp_get_wtime();\n for (int i = 0; i < n_iters; i++)\n {\n num_threads = omp_get_num_threads();\n sum = sum + pow(a,i)/factorial(i);\n } #pragma omp parallel for reduction (+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/4/Differentiation_Single_Variable/main.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(h) shared(fwd_x, bwd_x, cd_x)", "context_chars": 100, "text": "erence, Backward difference and Central difference\n double par_start_time = omp_get_wtime();\n for (int i = 0; i < num_points; i++)\n {\n num_threads = omp_get_num_threads();\n\n double a = i + 2;\n fwd_x[i] = (func_x(a+h) - func_x(a))/h;\n bwd_x[i] = (func_x(a) - func_x(a-h))/h;\n cd_x[i] = (func_x(a+h) - func_x(a-h))/(2*h);\n } #pragma omp parallel for firstprivate(h) shared(fwd_x, bwd_x, cd_x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/7/Gauss_Siedel/main.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:sum) firstprivate (row, A)", "context_chars": 100, "text": "\n for (int row = 0; row < mat_size; row++)\n {\n double sum = 0;\n for (int col = 0; col < row-1; col++)\n {\n num_threads = omp_get_num_threads();\n if (col != row)\n {\n sum = sum + A[row][col]*x[col];\n }\n } #pragma omp parallel for reduction (+:sum) firstprivate (row, A)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/10/Newton_Raphson_single_variable/main.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:counter) lastprivate(x_new) firstprivate(x, n_iters, tol)", "context_chars": 100, "text": "100; // Number of iterations\n int counter = 0;\n double tol = 0.001; // Tolerance limit\n\n for (int i = 0; i < n_iters; i++)\n {\n x_new = x - (func_x(x) / diff_x(x));\n\n if (fabs(x_new - x) < tol)\n {\n #pragma omp cancel for // Stop iteration when tolerance reached - number of iterations grow significantly\n }\n\n x = x_new; // Substitute old value with newer one for next iteration\n counter = counter+1; // Number of iterations counter\n } #pragma omp parallel for reduction(+:counter) lastprivate(x_new) firstprivate(x, n_iters, tol)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/2/Taylor_Series/main.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "hread_count;\n\n // Parallel implementation\n\n double start_parallel_time = omp_get_wtime();\n for (int i = 0; i < 3; i++)\n {\n thread_count = omp_get_num_threads();\n sum = sum + ((pow(h,i))/factorial(i))*exp(a);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/5/Numerical_Integration/main.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:integral_mult) firstprivate(num_points, h)", "context_chars": 100, "text": " double integral_mult = 0;\n int thread_num;\n\n double par_start_time = omp_get_wtime();\n for (int i = 0; i < num_points-1; i++)\n {\n thread_num = omp_get_num_threads();\n integral_mult = integral_mult + ( (h/2.0) * (func_x(a + i*h) + func_x(a+(i+1)*h)) );\n } #pragma omp parallel for reduction(+:integral_mult) firstprivate(num_points, h)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/13/Linear_Regression/main.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x_sum, y_sum) firstprivate(x,y)", "context_chars": 100, "text": "len_arr = sizeof(x)/sizeof(x[0]);\n double x_sum = 0;\n double y_sum = 0;\n double a, b;\n\n for (int i = 0; i < len_arr; i++)\n {\n x_sum = x_sum + x[i];\n y_sum = y_sum + y[i];\n } #pragma omp parallel for reduction(+:x_sum, y_sum) firstprivate(x,y)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/13/Linear_Regression/main.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:temp1, temp2) firstprivate(x_avg, y_avg, x, y)", "context_chars": 100, "text": "= x_sum/len_arr;\n double y_avg = y_sum/len_arr;\n\n double temp1 = 0;\n double temp2 = 0;\n for (int i = 0; i < len_arr; i++)\n {\n temp1 = temp1 + (x[i] - x_avg)*(y[i] - y_avg);\n temp2 = temp2 + (x[i] - x_avg)*(x[i] - x_avg);\n } #pragma omp parallel for reduction(+:temp1, temp2) firstprivate(x_avg, y_avg, x, y)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D NonLinear Convection/2D_NonLinear_Convection/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "oints], u_new[y_points][x_points];\n double v[y_points][x_points], v_new[y_points][x_points];\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n u[i][j] = 1.0;\n v[i][j] = 1.0;\n u_new[i][j] = 1.0;\n v_new[i][j] = 1.0;\n\n if(x[j] > 0.5 && x[j] < 1.0 && y[i] > 0.5 && y[i] < 1.0){\n u[i][j] = 2.0;\n v[i][j] = 2.0;\n u_new[i][j] = 2.0;\n v_new[i][j] = 2.0;\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Burgers Equation/2-D_Burgers_Equation/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "oints], u_new[y_points][x_points];\n double v[y_points][x_points], v_new[y_points][x_points];\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n u[i][j] = 1.0;\n v[i][j] = 1.0;\n u_new[i][j] = 1.0;\n v_new[i][j] = 1.0;\n\n if(x[j] > 0.5 && x[j] < 1.0 && y[i] > 0.5 && y[i] < 1.0){\n u[i][j] = 2.0;\n v[i][j] = 2.0;\n u_new[i][j] = 2.0;\n v_new[i][j] = 2.0;\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Channel Flow/2-D_Channel_Flow/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s];\n double u_new[y_points][x_points], v_new[y_points][x_points], p_new[y_points][x_points];\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n u[i][j] = 0.0;\n v[i][j] = 0.0;\n p[i][j] = 0.0;\n u_new[i][j] = 0.0;\n v_new[i][j] = 0.0;\n p_new[i][j] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D NonLinear Convection/main.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(del_x)", "context_chars": 100, "text": " consider\n double del_x = x_len/(x_points-1); // Length of an element\n double x[x_points];\n\n for (int i = 0; i < x_points; i++){\n x[i] = i * del_x; // x co-ordinates\n } #pragma omp parallel for firstprivate(del_x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D NonLinear Convection/main.c", "omp_pragma_line": "#pragma omp parallel for shared(x)", "context_chars": 100, "text": " // Velocity at current time\n double u_new[x_points]; // Velocity at next time interval\n for (int i = 0; i < x_points; i++){\n if (x[i] > 0.5 && x[i] < 1.0){\n u[i] = 2.0;\n u_new[i] = 2.0;\n }\n else{\n u[i] = 1.0;\n u_new[i] = 1.0;\n }\n } #pragma omp parallel for shared(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Diffusion/2-D_Diffusion/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "oints], u_new[y_points][x_points];\n double v[y_points][x_points], v_new[y_points][x_points];\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n u[i][j] = 1.0;\n v[i][j] = 1.0;\n u_new[i][j] = 1.0;\n v_new[i][j] = 1.0;\n\n if(x[j] > 0.5 && x[j] < 1.0 && y[i] > 0.5 && y[i] < 1.0){\n u[i][j] = 2.0;\n v[i][j] = 2.0;\n u_new[i][j] = 2.0;\n v_new[i][j] = 2.0;\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D Linear Convection/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "to consider\n float del_x = x_len/(x_points-1); // Length of an element\n float x[x_points];\n\n for (int i = 0; i < x_points; i++){\n x[i] = i * del_x; // x co-ordinates\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Poissons Equation/2-D_Poissons_Equation/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "points], p_new[y_points][x_points];\n double b[y_points][x_points]; // source term\n\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n p[i][j] = 0.0;\n p_new[i][j] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Poissons Equation/2-D_Poissons_Equation/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "j] = 0.0;\n }\n }\n\n // Initialize source term - add spikes at 1/4th and 3/4th length\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n b[i][j] = 0.0;\n if(i == abs(0.25*x_points) && j == abs(0.25*y_points)){\n b[i][j] = 100;\n }\n if(i == abs(0.75*x_points) && j == abs(0.75*y_points)){\n b[i][j] = -100;\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D Diffusion/1-D_Diffusion/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " // CFL criteria\n\n double x[x_points];\n double u[x_points];\n double u_new[x_points];\n for (int i = 0; i < x_points; i++){\n x[i] = i * del_x; // Co-ordinates of the grid points\n\n if (x[i] > 0.5 && x[i] < 1.0){ // Applying I.C.s for velocity values\n u[i] = 2.0;\n u_new[i] = 2.0;\n }\n else{\n u[i] = 1.0;\n u_new[i] = 1.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Laplace Equation/2-D_Laplace_Equation/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ints];\n double l1norm = 1.0;\n double l1norm_limit = 0.0001;\n double sum_num, sum_den;\n\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n p[i][j] = 0.0;\n p_new[i][j] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Linear Convection/2-D_Linear_Convection/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n// }\n\n double u[y_points][x_points];\n double u_new[y_points][x_points];\n\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n u[i][j] = 1.0;\n u_new[i][j] = 1.0;\n if(x[i] > 0.5 && x[i] < 1.0 && y[i] > 0.5 && y[i] < 1.0){\n u[i][j] = 2.0;\n u_new[i][j] = 2.0;\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Cavity Flow/2-D_Cavity_Flow/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s];\n double u_new[y_points][x_points], v_new[y_points][x_points], p_new[y_points][x_points];\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n u[i][j] = 0.0;\n v[i][j] = 0.0;\n p[i][j] = 0.0;\n u_new[i][j] = 0.0;\n v_new[i][j] = 0.0;\n p_new[i][j] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/Dynamic_Memory_Allocation/2-D_NonLinear_Convection/2-D_Nonlinear_Convection_Dynamic/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "oints*x_points*sizeof(double));\n v_new = (double *)malloc(y_points*x_points*sizeof(double));\n for(int i = 0; i < y_points; i++){\n for(int j = 0; j < x_points; j++){\n *(u+i*x_points+j) = 1.0;\n *(v+i*x_points+j) = 1.0;\n *(u_new+i*x_points+j) = 1.0;\n *(v_new+i*x_points+j) = 1.0;\n\n if(*(x+j) > 0.5 && *(x+j) < 1.0 && *(y+i) > 0.5 && *(y+i) < 1.0){\n *(u+i*x_points+j) = 2.0;\n *(v+i*x_points+j) = 2.0;\n *(u_new+i*x_points+j) = 2.0;\n *(v_new+i*x_points+j) = 2.0;\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/Dynamic_Memory_Allocation/1-D_Linear_Convection/1-D_Linear_Convection_Dynamic/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "end_time - ser_start_time);\n\n\n // Parallel execution\n\n // Defining the initial conditions\n for(int i = 0; i < x_points; i++){\n *(u+i) = 1.0;\n *(u_new+i) = 1.0;\n if(*(x+i) > 0.5 && *(x+i) < 1.0){\n *(u+i) = 2.0;\n *(u_new+i) = 2.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D Burgers Equation/1D_Burgers_Equation/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * pi;\n int x_points = 1001;\n double del_x = x_len/(x_points-1);\n double x[x_points];\n\n for (int i = 0; i < x_points; i++){\n x[i] = i * del_x;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D Burgers Equation/1D_Burgers_Equation/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "double del_t = nu * del_x;\n\n double u[x_points], u_new[x_points];\n\n // Initial value of u\n for (int i = 0; i < x_points; i++){\n u[i] = - 2.0 * nu * ( - (2.0 * x[i]) * exp( - (x[i] * x[i]) / (4.0 * nu)) / (4.0 * nu) - (2.0 * x[i] - 4.0 * pi) * exp( - (x[i] - 2.0 * pi) * (x[i] - 2.0 * pi) / (4.0 * nu)) / (4.0 * nu)) / (exp( - (x[i] - 2.0 * pi) * (x[i] - 2.0 * pi) / (4.0 * nu)) + exp( - (x[i] * x[i]) / (4.0 * nu))) + 4.0;\n u_new[i] = u[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Introduction_To_OpenMP/6/Fibonacci/main.c", "omp_pragma_line": "#pragma omp parallel for private(val)", "context_chars": 100, "text": " val = fibonnaci(n-1) + fibonnaci(n-2);\n return val;\n }\n}\n\nint main()\n{\n int val;\n for (int i = 0; i <= 10; i++)\n {\n val = fibonnaci(i);\n printf(\"Fibonacci of %d th term is: %d\\n\", i, val);\n } #pragma omp parallel for private(val)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Introduction_To_OpenMP/4/Loop/main.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "ble len_ele = 1.0/(num_ele-1);\n double sum = 0.0;\n\n double start_time = omp_get_wtick();\n\n for (int i = 0; i < num_ele; i++)\n {\n double x = i*len_ele;\n sum = sum + 4.0/(1+(x*x));\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Introduction_To_OpenMP/5/Debug_Mandelbrot/main.c", "omp_pragma_line": "#pragma omp parallel for default(shared) shared(c) firstprivate(eps)", "context_chars": 100, "text": "ntains the Mandelbrot set,\n// testing each point to see whether it is inside or outside the set.\n\nfor (int i=0; i #pragma omp parallel for default(shared) shared(c) firstprivate(eps)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "tializeGrid( LBM_Grid grid ) {\n\tSWEEP_VAR\n\n\t/*voption indep*/\n#if !defined(SPEC_CPU)\n#ifdef _OPENMP\nSWEEP_START( 0, 0, -2, 0, 0, SIZE_Z+2 )\n\t\tLOCAL( grid, C ) = DFL1;\n\t\tLOCAL( grid, N ) = DFL2;\n\t\tLOCAL( grid, S ) = DFL2;\n\t\tLOCAL( grid, E ) = DFL2;\n\t\tLOCAL( grid, W ) = DFL2;\n\t\tLOCAL( grid, T ) = DFL2;\n\t\tLOCAL( grid, B ) = DFL2;\n\t\tLOCAL( grid, NE ) = DFL3;\n\t\tLOCAL( grid, NW ) = DFL3;\n\t\tLOCAL( grid, SE ) = DFL3;\n\t\tLOCAL( grid, SW ) = DFL3;\n\t\tLOCAL( grid, NT ) = DFL3;\n\t\tLOCAL( grid, NB ) = DFL3;\n\t\tLOCAL( grid, ST ) = DFL3;\n\t\tLOCAL( grid, SB ) = DFL3;\n\t\tLOCAL( grid, ET ) = DFL3;\n\t\tLOCAL( grid, EB ) = DFL3;\n\t\tLOCAL( grid, WT ) = DFL3;\n\t\tLOCAL( grid, WB ) = DFL3;\n\n\t\tCLEAR_ALL_FLAGS_SWEEP( grid );\n\tSWEEP_END\n}\n\n/*############################################################################*/\n\nvoid LBM_swapGrids( LBM_GridPtr* grid1, LBM_GridPtr* grid2 ) {\n\tLBM_GridPtr aux = *grid1;\n\t*grid1 = *grid2;\n\t*grid2 = aux;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c", "omp_pragma_line": "#pragma omp parallel for private( x, y )", "context_chars": 100, "text": "ForLDC( LBM_Grid grid ) {\n\tint x, y, z;\n\n\t/*voption indep*/\n#if !defined(SPEC_CPU)\n#ifdef _OPENMP\nfor( z = -2; z < SIZE_Z+2; z++ ) {\n\t\tfor( y = 0; y < SIZE_Y; y++ ) {\n\t\t\tfor( x = 0; x < SIZE_X; x++ ) {\n\t\t\t\tif( x == 0 || x == SIZE_X-1 ||\n\t\t\t\t y == 0 || y == SIZE_Y-1 ||\n\t\t\t\t z == 0 || z == SIZE_Z-1 ) {\n\t\t\t\t\tSET_FLAG( grid, x, y, z, OBSTACLE );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tif( (z == 1 || z == SIZE_Z-2) &&\n\t\t\t\t\t x > 1 && x < SIZE_X-2 &&\n\t\t\t\t\t y > 1 && y < SIZE_Y-2 ) {\n\t\t\t\t\t\tSET_FLAG( grid, x, y, z, ACCEL );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for private( x, y )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c", "omp_pragma_line": "#pragma omp parallel for private( x, y )", "context_chars": 100, "text": "hannel( LBM_Grid grid ) {\n\tint x, y, z;\n\n\t/*voption indep*/\n#if !defined(SPEC_CPU)\n#ifdef _OPENMP\nfor( z = -2; z < SIZE_Z+2; z++ ) {\n\t\tfor( y = 0; y < SIZE_Y; y++ ) {\n\t\t\tfor( x = 0; x < SIZE_X; x++ ) {\n\t\t\t\tif( x == 0 || x == SIZE_X-1 ||\n\t\t\t\t y == 0 || y == SIZE_Y-1 ) {\n\t\t\t\t\tSET_FLAG( grid, x, y, z, OBSTACLE );\n\n\t\t\t\t\tif( (z == 0 || z == SIZE_Z-1) &&\n\t\t\t\t\t ! TEST_FLAG( grid, x, y, z, OBSTACLE ))\n\t\t\t\t\t\tSET_FLAG( grid, x, y, z, IN_OUT_FLOW );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for private( x, y )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c", "omp_pragma_line": "#pragma omp parallel for private( ux, uy, uz, u2, rho )", "context_chars": 100, "text": "\n\tSWEEP_VAR\n\n\tdouble ux, uy, uz, u2, rho;\n\n\t/*voption indep*/\n#if !defined(SPEC_CPU)\n#ifdef _OPENMP\nSWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )\n\t\tif( TEST_FLAG_SWEEP( srcGrid, OBSTACLE )) {\n\t\t\tDST_C ( dstGrid ) = SRC_C ( srcGrid );\n\t\t\tDST_S ( dstGrid ) = SRC_N ( srcGrid );\n\t\t\tDST_N ( dstGrid ) = SRC_S ( srcGrid );\n\t\t\tDST_W ( dstGrid ) = SRC_E ( srcGrid );\n\t\t\tDST_E ( dstGrid ) = SRC_W ( srcGrid );\n\t\t\tDST_B ( dstGrid ) = SRC_T ( srcGrid );\n\t\t\tDST_T ( dstGrid ) = SRC_B ( srcGrid );\n\t\t\tDST_SW( dstGrid ) = SRC_NE( srcGrid );\n\t\t\tDST_SE( dstGrid ) = SRC_NW( srcGrid );\n\t\t\tDST_NW( dstGrid ) = SRC_SE( srcGrid );\n\t\t\tDST_NE( dstGrid ) = SRC_SW( srcGrid );\n\t\t\tDST_SB( dstGrid ) = SRC_NT( srcGrid );\n\t\t\tDST_ST( dstGrid ) = SRC_NB( srcGrid );\n\t\t\tDST_NB( dstGrid ) = SRC_ST( srcGrid );\n\t\t\tDST_NT( dstGrid ) = SRC_SB( srcGrid );\n\t\t\tDST_WB( dstGrid ) = SRC_ET( srcGrid );\n\t\t\tDST_WT( dstGrid ) = SRC_EB( srcGrid );\n\t\t\tDST_EB( dstGrid ) = SRC_WT( srcGrid );\n\t\t\tDST_ET( dstGrid ) = SRC_WB( srcGrid );\n\t\t\tcontinue;\n\t\t} #pragma omp parallel for private( ux, uy, uz, u2, rho )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c", "omp_pragma_line": "#pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \\", "context_chars": 100, "text": " u2, px, py;\n\tSWEEP_VAR\n\n\t/* inflow */\n\t/*voption indep*/\n#if !defined(SPEC_CPU)\n#ifdef _OPENMP\nux2, uy2, uz2, rho2, u2, px, py )\n\n\n\tSWEEP_START( 0, 0, 0, 0, 0, 1 )\n\t\trho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, N )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, E )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, T )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, ST )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, ET )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, WT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, WB );\n\t\trho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, N )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, E )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, T )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, ST )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, ET )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, WT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, WB );\n\n\t\trho = 2.0*rho1 - rho2;\n\n\t\tpx = (SWEEP_X / (0.5*(SIZE_X-1))) - 1.0;\n\t\tpy = (SWEEP_Y / (0.5*(SIZE_Y-1))) - 1.0;\n\t\tux = 0.00;\n\t\tuy = 0.00;\n\t\tuz = 0.01 * (1.0-px*px) * (1.0-py*py);\n\n\t\tu2 = 1.5 * (ux*ux + uy*uy + uz*uz);\n\n\t\tLOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2);\n\n\t\tLOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2);\n\t\tLOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2);\n\t\tLOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2);\n\t\tLOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2);\n\t\tLOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2);\n\t\tLOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2);\n\n\t\tLOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2);\n\tSWEEP_END\n\n\t/* outflow */\n\t/*voption indep*/\n#if !defined(SPEC_CPU)\n#ifdef _OPENMP\n#pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \\\n ux2, uy2, uz2, rho2, u2, px, py )\n\n\n\n\tSWEEP_START( 0, 0, SIZE_Z-1, 0, 0, SIZE_Z )\n\t\trho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB );\n\t\tux1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB );\n\t\tuy1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB );\n\t\tuz1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB );\n\n\t\tux1 /= rho1;\n\t\tuy1 /= rho1;\n\t\tuz1 /= rho1;\n\n\t\trho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB );\n\t\tux2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB );\n\t\tuy2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB );\n\t\tuz2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB );\n\n\t\tux2 /= rho2;\n\t\tuy2 /= rho2;\n\t\tuz2 /= rho2;\n\n\t\trho = 1.0;\n\n\t\tux = 2*ux1 - ux2;\n\t\tuy = 2*uy1 - uy2;\n\t\tuz = 2*uz1 - uz2;\n\n\t\tu2 = 1.5 * (ux*ux + uy*uy + uz*uz);\n\n\t\tLOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2);\n\n\t\tLOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2);\n\t\tLOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2);\n\t\tLOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2);\n\t\tLOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2);\n\t\tLOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2);\n\t\tLOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2);\n\n\t\tLOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2);\n\tSWEEP_END\n}\n\n/*############################################################################*/\n\nvoid LBM_showGridStatistics( LBM_Grid grid ) {\n\tint nObstacleCells = 0,\n\t nAccelCells = 0,\n\t nFluidCells = 0;\n\tdouble ux, uy, uz;\n\tdouble minU2 = 1e+30, maxU2 = -1e+30, u2;\n\tdouble minRho = 1e+30, maxRho = -1e+30, rho;\n\tdouble mass = 0;\n\n\tSWEEP_VAR\n\n\tSWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )\n\t\trho = + LOCAL( grid, C ) + LOCAL( grid, N )\n\t\t + LOCAL( grid, S ) + LOCAL( grid, E )\n\t\t + LOCAL( grid, W ) + LOCAL( grid, T )\n\t\t + LOCAL( grid, B ) + LOCAL( grid, NE )\n\t\t + LOCAL( grid, NW ) + LOCAL( grid, SE )\n\t\t + LOCAL( grid, SW ) + LOCAL( grid, NT )\n\t\t + LOCAL( grid, NB ) + LOCAL( grid, ST )\n\t\t + LOCAL( grid, SB ) + LOCAL( grid, ET )\n\t\t + LOCAL( grid, EB ) + LOCAL( grid, WT )\n\t\t + LOCAL( grid, WB );\n\t\tif( rho < minRho ) minRho = rho;\n\t\tif( rho > maxRho ) maxRho = rho;\n\t\tmass += rho;\n\n\t\tif( TEST_FLAG_SWEEP( grid, OBSTACLE )) {\n\t\t\tnObstacleCells++;\n\t\t}\n\t\telse {\n\t\t\tif( TEST_FLAG_SWEEP( grid, ACCEL ))\n\t\t\t\tnAccelCells++;\n\t\t\telse\n\t\t\t\tnFluidCells++;\n\n\t\t\tux = + LOCAL( grid, E ) - LOCAL( grid, W )\n\t\t\t + LOCAL( grid, NE ) - LOCAL( grid, NW )\n\t\t\t + LOCAL( grid, SE ) - LOCAL( grid, SW )\n\t\t\t + LOCAL( grid, ET ) + LOCAL( grid, EB )\n\t\t\t - LOCAL( grid, WT ) - LOCAL( grid, WB );\n\t\t\tuy = + LOCAL( grid, N ) - LOCAL( grid, S )\n\t\t\t + LOCAL( grid, NE ) + LOCAL( grid, NW )\n\t\t\t - LOCAL( grid, SE ) - LOCAL( grid, SW )\n\t\t\t + LOCAL( grid, NT ) + LOCAL( grid, NB )\n\t\t\t - LOCAL( grid, ST ) - LOCAL( grid, SB );\n\t\t\tuz = + LOCAL( grid, T ) - LOCAL( grid, B )\n\t\t\t + LOCAL( grid, NT ) - LOCAL( grid, NB )\n\t\t\t + LOCAL( grid, ST ) - LOCAL( grid, SB )\n\t\t\t + LOCAL( grid, ET ) - LOCAL( grid, EB )\n\t\t\t + LOCAL( grid, WT ) - LOCAL( grid, WB );\n\t\t\tu2 = (ux*ux + uy*uy + uz*uz) / (rho*rho);\n\t\t\tif( u2 < minU2 ) minU2 = u2;\n\t\t\tif( u2 > maxU2 ) maxU2 = u2;\n\t\t}\n\tSWEEP_END\n\n printf( \"LBM_showGridStatistics:\\n\"\n \"\\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\\n\"\n \"\\tminRho: %8.4f maxRho: %8.4f mass: %e\\n\"\n \"\\tminU: %e maxU: %e\\n\\n\",\n nObstacleCells, nAccelCells, nFluidCells,\n minRho, maxRho, mass,\n sqrt( minU2 ), sqrt( maxU2 ) );\n\n} #pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c", "omp_pragma_line": "#pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \\", "context_chars": 100, "text": ") + 3.0) - u2);\n\tSWEEP_END\n\n\t/* outflow */\n\t/*voption indep*/\n#if !defined(SPEC_CPU)\n#ifdef _OPENMP\nux2, uy2, uz2, rho2, u2, px, py )\n\n\n\n\tSWEEP_START( 0, 0, SIZE_Z-1, 0, 0, SIZE_Z )\n\t\trho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB );\n\t\tux1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB );\n\t\tuy1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB );\n\t\tuz1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB );\n\n\t\tux1 /= rho1;\n\t\tuy1 /= rho1;\n\t\tuz1 /= rho1;\n\n\t\trho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB );\n\t\tux2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB );\n\t\tuy2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB )\n\t\t - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB );\n\t\tuz2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB )\n\t\t + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB );\n\n\t\tux2 /= rho2;\n\t\tuy2 /= rho2;\n\t\tuz2 /= rho2;\n\n\t\trho = 1.0;\n\n\t\tux = 2*ux1 - ux2;\n\t\tuy = 2*uy1 - uy2;\n\t\tuz = 2*uz1 - uz2;\n\n\t\tu2 = 1.5 * (ux*ux + uy*uy + uz*uz);\n\n\t\tLOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2);\n\n\t\tLOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2);\n\t\tLOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2);\n\t\tLOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2);\n\t\tLOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2);\n\t\tLOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2);\n\t\tLOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2);\n\n\t\tLOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2);\n\t\tLOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2);\n\t\tLOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2);\n\tSWEEP_END\n}\n\n/*############################################################################*/\n\nvoid LBM_showGridStatistics( LBM_Grid grid ) {\n\tint nObstacleCells = 0,\n\t nAccelCells = 0,\n\t nFluidCells = 0;\n\tdouble ux, uy, uz;\n\tdouble minU2 = 1e+30, maxU2 = -1e+30, u2;\n\tdouble minRho = 1e+30, maxRho = -1e+30, rho;\n\tdouble mass = 0;\n\n\tSWEEP_VAR\n\n\tSWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )\n\t\trho = + LOCAL( grid, C ) + LOCAL( grid, N )\n\t\t + LOCAL( grid, S ) + LOCAL( grid, E )\n\t\t + LOCAL( grid, W ) + LOCAL( grid, T )\n\t\t + LOCAL( grid, B ) + LOCAL( grid, NE )\n\t\t + LOCAL( grid, NW ) + LOCAL( grid, SE )\n\t\t + LOCAL( grid, SW ) + LOCAL( grid, NT )\n\t\t + LOCAL( grid, NB ) + LOCAL( grid, ST )\n\t\t + LOCAL( grid, SB ) + LOCAL( grid, ET )\n\t\t + LOCAL( grid, EB ) + LOCAL( grid, WT )\n\t\t + LOCAL( grid, WB );\n\t\tif( rho < minRho ) minRho = rho;\n\t\tif( rho > maxRho ) maxRho = rho;\n\t\tmass += rho;\n\n\t\tif( TEST_FLAG_SWEEP( grid, OBSTACLE )) {\n\t\t\tnObstacleCells++;\n\t\t}\n\t\telse {\n\t\t\tif( TEST_FLAG_SWEEP( grid, ACCEL ))\n\t\t\t\tnAccelCells++;\n\t\t\telse\n\t\t\t\tnFluidCells++;\n\n\t\t\tux = + LOCAL( grid, E ) - LOCAL( grid, W )\n\t\t\t + LOCAL( grid, NE ) - LOCAL( grid, NW )\n\t\t\t + LOCAL( grid, SE ) - LOCAL( grid, SW )\n\t\t\t + LOCAL( grid, ET ) + LOCAL( grid, EB )\n\t\t\t - LOCAL( grid, WT ) - LOCAL( grid, WB );\n\t\t\tuy = + LOCAL( grid, N ) - LOCAL( grid, S )\n\t\t\t + LOCAL( grid, NE ) + LOCAL( grid, NW )\n\t\t\t - LOCAL( grid, SE ) - LOCAL( grid, SW )\n\t\t\t + LOCAL( grid, NT ) + LOCAL( grid, NB )\n\t\t\t - LOCAL( grid, ST ) - LOCAL( grid, SB );\n\t\t\tuz = + LOCAL( grid, T ) - LOCAL( grid, B )\n\t\t\t + LOCAL( grid, NT ) - LOCAL( grid, NB )\n\t\t\t + LOCAL( grid, ST ) - LOCAL( grid, SB )\n\t\t\t + LOCAL( grid, ET ) - LOCAL( grid, EB )\n\t\t\t + LOCAL( grid, WT ) - LOCAL( grid, WB );\n\t\t\tu2 = (ux*ux + uy*uy + uz*uz) / (rho*rho);\n\t\t\tif( u2 < minU2 ) minU2 = u2;\n\t\t\tif( u2 > maxU2 ) maxU2 = u2;\n\t\t}\n\tSWEEP_END\n\n printf( \"LBM_showGridStatistics:\\n\"\n \"\\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\\n\"\n \"\\tminRho: %8.4f maxRho: %8.4f mass: %e\\n\"\n \"\\tminU: %e maxU: %e\\n\\n\",\n nObstacleCells, nAccelCells, nFluidCells,\n minRho, maxRho, mass,\n sqrt( minU2 ), sqrt( maxU2 ) );\n\n} #pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Instrument/test.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "de \n#include \n\n\nvoid fct1(int k,int l[])\n{\n\n int i;\n int p; printf(\"Start\\n\");\n\nfor(i=0;i<4;i++)\n { \n printf(\"LOOPA%d\\n\",i);\n p+=i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Instrument/test.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "(\"LOOPA%d\\n\",i);\n p+=i;\n }\n\n\n for(i=0;i<4;i++)\n { \n printf(\"LOOPB%d\\n\",i);\n p+=i;\n }\n\nfor(i=0;i<4;i++)\n { \n p+=i;printf(\"LOOPC%d thread__%d\\n\",i,omp_get_thread_num());\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Capture_Replay/test.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "de \n#include \n\n\nvoid fct1(int k,int l[])\n{\n\n int i;\n int p; printf(\"Start\\n\");\n\nfor(i=0;i<4;i++)\n { \n printf(\"LOOPA%d\\n\",i);\n p+=i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Capture_Replay/test.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "(\"LOOPA%d\\n\",i);\n p+=i;\n }\n\n\n for(i=0;i<4;i++)\n { \n printf(\"LOOPB%d\\n\",i);\n p+=i;\n }\n\nfor(i=0;i<4;i++)\n { \n p+=i;printf(\"LOOPC%d thread__%d\\n\",i,omp_get_thread_num());\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Blackscholes/blackscholes.m4.cpp", "omp_pragma_line": "#pragma omp parallel for private(i, price, priceDelta)", "context_chars": 100, "text": " int end = start + (numOptions / nThreads);\n\n for (j=0; jfor (i=0; i= 1e-4 ){\n printf(\"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\\n\",\n i, price, data[i].DGrefval, priceDelta);\n numError ++;\n }\n\n }\n } #pragma omp parallel for private(i, price, priceDelta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/src/imop/lib/testcases/allKnown.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "gma omp section\n\t\t{\n\t\t\ttestThisNonLeaf:\n#pragma omp critical\n\t\t\t{\n\t\t\t\tx = x + 6;\n\t\t\t}\n\t\t}\n\t}\n\tim51:\nfor (iter = 0; iter < 8; iter++) {\n\t\tint x1;\n\t\tint y1;\n\t\tx1 += my(8);\n\t\tfoo(x1, y1, 1);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/src/imop/lib/testcases/cfgTests/parallelForConstruct.c", "omp_pragma_line": "#pragma omp parallel for private(a)", "context_chars": 100, "text": "int main() {\n\tint x = 10;\n\tint a;\nfor(x = 0; x < 10; x++)\n\t{\n\t\ta -= 10;\n\t} #pragma omp parallel for private(a)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/src/imop/lib/testcases/cfgTests/allCFG.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "a omp section\n\t\t{\n\t\t\ttestThisNonLeaf:;\n#pragma omp critical\n\t\t\t{\n\t\t\t\tx = x + 6;\n\t\t\t}\n\t\t}\n\t}\n\tim51:;\nfor (iter = 0; iter < 8; iter++) {\n\t\tint x1;\n\t\tint y1;\n\t\tx1 += my(8);\n\t\tfoo(x1, y1, 1);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/src/imop/lib/testcases/simplification/test5.c", "omp_pragma_line": "#pragma omp parallel for ordered default(shared) private(i)", "context_chars": 100, "text": "int main () {\n\tint i;\n\tint j = 10;\nfor(i = 0; i < j; i++) {\n#pragma omp atomic update\n\t\ti = i + 1;\n\t} #pragma omp parallel for ordered default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/enforcer.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ns\n\t{\n#pragma omp section\n\t\tx++;\n#pragma omp section\n\t\tx+=2;\n\t}\n#pragma omp single\n#pragma omp task\nfor (x = 0; x < 12; x++)\n\t\tx = x + 0;\n#pragma omp parallel sections\n\t{\n#pragma omp section\n\t\tx++;\n#pragma omp section\n\t\tx+=2;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/final-preproc/sp-b.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)", "context_chars": 100, "text": "\n }\n }\n}\nstatic void ninvr(void) {\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n \nfor (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = bt * r3;\n t2 = 0.5 * ( r4 + r5 );\n rhs[0][i][j][k] = -r2;\n rhs[1][i][j][k] = r1;\n rhs[2][i][j][k] = bt * ( r4 - r5 );\n rhs[3][i][j][k] = -t1 + t2;\n rhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/final-preproc/sp-b.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)", "context_chars": 100, "text": "\n }\n }\n}\nstatic void pinvr(void) {\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n \nfor (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = bt * r1;\n t2 = 0.5 * ( r4 + r5 );\n rhs[0][i][j][k] = bt * ( r4 - r5 );\n rhs[1][i][j][k] = -r3;\n rhs[2][i][j][k] = r2;\n rhs[3][i][j][k] = -t1 + t2;\n rhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "*\n * @g: the graph\n */\nvoid initialize_graph(graph* g) {\n DEBUG(\"initializing the graph\\n\");\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = malloc(sizeof(payload));\n u->data = u_data;\n\n u_data->fragment_id = u->label;\n u_data->tmp_fragment_id = u->label;\n u_data->received_first_message = 0;\n u_data->b = NULL;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "ueuelist* msgs, queuelist* tmp_msgs, queuelist* blues) {\n DEBUG(\"planting root messages\\n\");\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n u_data->received_first_message = 0;\n\n /* Only roots find the blue edge */\n if (u_data->fragment_id != u->label)\n continue;\n\n message m = {-1};\n enqueue(msgs, u->label, &m);\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": " while (nodes_yet_to_recv) {\n DEBUG(\"propagating the messages across the graph\\n\");\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n if (u_data->received_first_message)\n continue;\n\n while (!is_ql_queue_empty(msgs, u->label)) {\n u_data->received_first_message = 1;\n message* m = dequeue(msgs, u->label);\n\n for (int j = 0; j < u->degree; j++) {\n node* v = *((node**) elem_at(&u->neighbors, j));\n payload* v_data = v->data;\n\n /* Don't send the message back to the source */\n if (v->label == m->from)\n continue;\n\n /**\n * If the neighbor is outside the fragment it's a potential\n * blue edge. Otherwise it's just a carrier for this message.\n */\n if (v_data->fragment_id != u_data->fragment_id) {\n edge b = {u->label, v->label, g->adj_mat[u->label][v->label]};\n enqueue(blues, u_data->fragment_id, &b);\n }\n else {\n message mx = {u->label};\n enqueue(tmp_msgs, v->label, &mx);\n }\n }\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": " }\n }\n }\n\n DEBUG(\"moving messages from tmp_msgs to msgs\\n\");\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n while (!is_ql_queue_empty(tmp_msgs, u->label)) {\n message* m = dequeue(tmp_msgs, u->label);\n if (!u_data->received_first_message)\n enqueue(msgs, u->label, m);\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "_yet_to_recv = 0;\n\n DEBUG(\"checking if there are any more nodes left to process\\n\");\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n if (!u_data->received_first_message)\n nodes_yet_to_recv = 1;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": " would have happened.\n */\n DEBUG(\"finding the minimum of the accumulated blue edges\\n\");\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n /* only roots find the blue edge */\n if (u_data->fragment_id != u->label)\n continue;\n\n edge* min_edge = NULL;\n while (!is_ql_queue_empty(blues, u->label)) {\n edge* b = dequeue(blues, u->label);\n if (min_edge == NULL) {\n min_edge = b;\n continue;\n }\n\n /**\n * So this might look like some kind of weird logic, but there's a\n * reason why I'm doing this: say there are two different fragments\n * with blue edges into each other. If the two blue edges are the\n * same, it's perfectly fine -- it'll be resolved in the conflicting\n * blue edges scennario. However, if they're different, they'll\n * both be added to the MST when it should only be one of them. To\n * prevent this, we'll simply use the edge that has a smaller value\n * of (u*N + v). Note that both will have the exact same weight,\n * so it's fine whichever one we choose.\n */\n int b_score = b->u*g->N + b->v;\n if (b->u > b->v)\n b_score = b->v*g->N + b->u;\n\n int min_score = min_edge->u*g->N + min_edge->v;\n if (min_edge->u > min_edge->v)\n min_score = min_edge->v*g->N + min_edge->u;\n\n if ((b->w < min_edge->w) || (b->w == min_edge->w && b_score < min_score))\n min_edge = b;\n }\n\n node* future_leader = elem_at(&g->vertices, min_edge->u);\n payload* future_leader_data = future_leader->data;\n\n u_data->b = min_edge;\n future_leader_data->b = min_edge;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "@g: the graph\n */\nvoid assign_tmp_fragments(graph* g) {\n DEBUG(\"setting tmp_fragment_id\\n\");\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n node* leader = elem_at(&g->vertices, u_data->fragment_id);\n payload* leader_data = leader->data;\n\n u_data->tmp_fragment_id = leader_data->b->u;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "data->tmp_fragment_id = leader_data->b->u;\n }\n\n DEBUG(\"setting temporary fragment_id\\n\");\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n u_data->fragment_id = u_data->tmp_fragment_id;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "r.\n */\n for (int ok = 0; ok < 2; ok++) {\n DEBUG(\"conflicts phase: %d\\n\", ok);\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n if (u_data->fragment_id != u->label)\n continue;\n\n #pragma omp critical\n {\n node* v = elem_at(&g->vertices, u_data->b->v);\n payload* v_data = v->data;\n\n node* v_leader = elem_at(&g->vertices, v_data->fragment_id);\n payload* v_leader_data = v_leader->data;\n\n int conflicting_merges = (u->label == v_leader_data->b->v &&\n v_leader_data->b->u == v->label &&\n u_data->b->v == v->label);\n\n if (conflicting_merges == ok) {\n change_fragment(g, u->label, v_leader->label);\n edge m = {u->label, v->label, g->adj_mat[u->label][v->label]};\n enqueue(mst, 0, &m);\n }\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mis.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "r each node.\n *\n * @g: a pointer to the graph object\n */\nvoid generate_random_field(graph* g) {\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n if (!data->present)\n continue;\n\n data->r = randnum();\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mis.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "entering the MIS.\n *\n * @g: a pointer to the graph object\n */\nvoid decide_mis_entry(graph* g) {\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n if (!data->present)\n continue;\n \n int enter = 1;\n for (int i = 0; i < cur->degree; i++) {\n node* neighbor = *((node**) elem_at(&cur->neighbors, i));\n payload* neighbor_data = neighbor->data;\n\n if (data->r > neighbor_data->r) {\n enter = 0;\n break;\n }\n }\n\n if (enter) {\n data->present = 0;\n data->in_mis = 1;\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mis.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "ve them.\n *\n * @g: a pointer to the graph object\n */\nvoid remove_mis_adjacent_nodes(graph* g) {\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n if (data->in_mis) {\n for (int i = 0; i < cur->degree; i++) {\n node* neighbor = *((node**) elem_at(&cur->neighbors, i));\n payload* neighbor_data = neighbor->data;\n\n neighbor_data->present = 0;\n }\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mis.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "nodes, returns 0 otherwise.\n */\nint do_present_nodes_exist(graph* g) {\n int keep_going = 0;\n\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n if (data->present)\n keep_going = 1;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/byzantine.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "sors, int N) {\n int ret = 0;\n\n DEBUG(\"checking if there are any undecided processors\\n\");\n for (int i = 0; i < N; i++) {\n processor* p = processors+i;\n\n if (p->decided == 0)\n ret = 1;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/byzantine.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "st_vote(processor* processors, int N, queuelist* vote_ql) {\n DEBUG(\"broadcasting votes\\n\");\n\n for (int i = 0; i < N; i++) {\n processor* p = processors+i;\n\n for (int j = 0; j < N; j++) {\n if (i == j)\n continue;\n\n vote v = {i, p->vote};\n enqueue(vote_ql, j, &v);\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/byzantine.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "a queuelist of votes\n */\nvoid receive_votes(processor* processors, int N, queuelist* vote_ql) {\n for (int i = 0; i < N; i++) {\n processor* p = processors+i;\n\n int yes = 0;\n int no = 0;\n while (!is_ql_queue_empty(vote_ql, i)) {\n vote* v = dequeue(vote_ql, i);\n\n if (v->vote)\n yes++;\n else\n no++;\n }\n\n int maj = 1;\n int tally = yes;\n if (no > yes) {\n maj = 0;\n tally = no;\n }\n\n int threshold;\n if (rand() % 2 == 0)\n threshold = L;\n else\n threshold = H;\n\n if (tally > threshold)\n p->vote = maj;\n else\n p->vote = 0;\n\n if (tally >= G) {\n p->decided = 1;\n p->d = maj;\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bellman_ford.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "messages, 0 otherwise.\n */\nint messages_in_queue(int N, queuelist* recv) {\n int result = 0;\n\n for (int i = 0; i < N; i++) {\n if (!is_ql_queue_empty(recv, i))\n result = 1;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bellman_ford.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "d(graph* g, queuelist* recv, queuelist* send) {\n DEBUG(\"receiving and sending messages\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n int lowest_y = INT_MAX;\n int lowest_from = 0;\n while (!is_ql_queue_empty(recv, i)) {\n message* m = dequeue(recv, i);\n\n if (lowest_y > m->y) {\n lowest_y = m->y;\n lowest_from = m->from;\n }\n }\n\n if (lowest_y != INT_MAX && lowest_y < u_data->distance) {\n u_data->distance = lowest_y;\n\n for (int j = 0; j < u->degree; j++) {\n node* v = *((node**) elem_at(&u->neighbors, j));\n payload* u_data = u->data;\n\n if (v->label == lowest_from)\n continue;\n\n message m = {u->label, lowest_y+1};\n enqueue(send, v->label, &m);\n }\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bellman_ford.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": " recv, queuelist* send) {\n DEBUG(\"propagating messages from the send queuelist to recv\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n\n while (!is_ql_queue_empty(send, u->label))\n enqueue(recv, u->label, dequeue(send, u->label));\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/vertex_coloring.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "k and 0\n * if none of the nodes need to colored.\n */\nint again(graph* g) {\n int result = 0;\n\n for (int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n if (u_data->again)\n result = 1;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/vertex_coloring.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "ts children.\n *\n * @g: the graph\n */\nvoid parent_to_child(graph* g) {\n DEBUG(\"starting\\n\");\n\n for(int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n if (u->label != ROOT) {\n node* parent = elem_at(&g->vertices, u_data->parent);\n payload* parent_data = parent->data;\n\n u_data->recv = parent_data->color;\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/vertex_coloring.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "\nvoid six_color_tree(graph *g, int digits) {\n DEBUG(\"starting\\n\");\n\n parent_to_child(g);\n\n for(int i = 0; i < g->N; i++) {\n node* u = elem_at(&g->vertices, i);\n payload* u_data = u->data;\n\n if(u->label == ROOT)\n continue;\n\n u_data->again = 0;\n\n int xor = u_data->recv ^ u_data->color;\n for(int k = 0; k < digits; k++) {\n int mask = 1 << k;\n\n /* If they have this bit different, color */\n if(xor & mask) {\n u_data->color = (k << 1) + (u_data->color & mask ? 1 : 0);\n break;\n }\n }\n\n if(u_data->color >= 6)\n u_data->again = 1;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_dp.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "neighbor's x value.\n *\n * @g: a pointer to the graph\n */\nvoid calculate_temporary_x(graph* g) {\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n processor* p = cur->data;\n\n int new_x = p->x;\n for (int j = 0; j < cur->degree; j++) {\n node* neighbor = *((node**) elem_at(&cur->neighbors, j));\n processor* neighbor_p = neighbor->data;\n\n if (new_x < neighbor_p->x)\n new_x = neighbor_p->x;\n }\n\n p->new_x = new_x;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_dp.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "if there was a change.\n */\nint propagate_temporary_x(graph* g) {\n int something_changed = 0;\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n processor* p = cur->data;\n\n if (p->new_x != p->x)\n something_changed = 1;\n\n p->x = p->new_x;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "he graph nodes with the payload data.\n *\n * @g: the graph\n */\nvoid initialize_graph(graph* g) {\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* data = malloc(sizeof(payload));\n\n data->color = WHITE;\n\n data->joined = 0;\n\n initialize_vector(&data->W, sizeof(node*));\n\n int* visited = malloc(g->N * sizeof(int));\n memset(visited, 0, g->N * sizeof(int));\n\n initialize_vector(&data->n2, sizeof(node*));\n for (int j = 0; j < v->degree; j++) {\n node* u = *((node**) elem_at(&v->neighbors, j));\n\n if (visited[u->label])\n continue;\n visited[u->label] = 1;\n\n append_to_vector(&data->n2, &u);\n\n for (int k = 0; k < u->degree; k++) {\n node* w = *((node**) elem_at(&u->neighbors, k));\n if (w == v)\n continue;\n\n if (visited[w->label])\n continue;\n visited[w->label] = 1;\n\n append_to_vector(&data->n2, &w);\n }\n }\n append_to_vector(&data->n2, &v);\n\n free(visited);\n\n v->data = data;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "de (WHITE nodes), 0\n * otherwise.\n */\nint unjoined_nodes_exist(graph* g) {\n int result = 0;\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* data = v->data;\n\n if (data->color == WHITE) {\n DEBUG(\"%d->color = WHITE\\n\", v->label);\n result = 1;\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "w for each vertex.\n *\n * @g: the graph\n */\nvoid compute_w(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n v_data->W.used = 0;\n\n if (v_data->color == WHITE)\n append_to_vector(&v_data->W, &v);\n\n for (int j = 0; j < v->degree; j++) {\n node* u = *((node**) elem_at(&v->neighbors, j));\n payload* u_data = u->data;\n\n if (u_data->color == WHITE)\n append_to_vector(&v_data->W, &u);\n }\n\n v_data->w = v_data->W.used;\n DEBUG(\"%d->w = %d\\n\", v->label, v_data->w);\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "each vertex.\n *\n * @g: the graph\n */\nvoid compute_w_tilde(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n v_data->w_tilde = ceil_power_of_2(v_data->w);\n DEBUG(\"%d->w_tilde = %d\\n\", v->label, v_data->w_tilde);\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "r each vertex.\n *\n * @g: the graph\n */\nvoid compute_w_hat(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n if (v_data->W.used == 0)\n continue;\n\n int w_hat = 0;\n for (int j = 0; j < v_data->n2.used; j++) {\n node* u = *((node**) elem_at(&v_data->n2, j));\n payload* u_data = u->data;\n\n if (u_data->w_tilde > w_hat)\n w_hat = u_data->w_tilde;\n }\n\n v_data->w_hat = w_hat;\n DEBUG(\"%d->w_hat = %d\\n\", v->label, w_hat);\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "ctive or not.\n *\n * @g: the graph\n */\nvoid compute_active(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n if (v_data->W.used == 0)\n continue;\n\n if (v_data->w_hat == v_data->w_tilde)\n v_data->active = 1;\n else\n v_data->active = 0;\n\n DEBUG(\"%d->active = %d\\n\", v->label, v_data->active);\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "ort for each node.\n *\n * @g: the graph\n */\nvoid compute_s(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n if (v_data->W.used == 0)\n continue;\n\n int support = v_data->active;\n for (int j = 0; j < v->degree; j++) {\n node* u = *((node**) elem_at(&v->neighbors, j));\n payload* u_data = u->data;\n\n if (u_data->active)\n support++;\n }\n\n DEBUG(\"%d->s = %d\\n\", v->label, support);\n v_data->s = support;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "mized fashion.\n *\n * @g: the graph\n */\nvoid compute_s_hat(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n if (v_data->W.used == 0)\n continue;\n\n int s_hat = 0;\n for (int j = 0; j < v_data->W.used; j++) {\n node* u = *((node**) elem_at(&v_data->W, j));\n payload* u_data = u->data;\n\n if (u_data->s > s_hat)\n s_hat = u_data->s;\n }\n\n DEBUG(\"%d->s_hat = %d\\n\", v->label, s_hat);\n v_data->s_hat = s_hat;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "d fashion.\n *\n * @g: the graph\n */\nvoid compute_candidacy(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n if (v_data->W.used == 0)\n continue;\n\n v_data->candidate = 0;\n if (v_data->active) {\n int r = rand() % (v_data->s_hat);\n if (r == 0)\n v_data->candidate = 1;\n }\n\n DEBUG(\"%d->candidate = %d\\n\", v->label, v_data->candidate);\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "\n * of each node).\n *\n * @g: the graph\n */\nvoid compute_c(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n if (v_data->W.used == 0)\n continue;\n\n v_data->c = 0;\n for (int j = 0; j < v_data->W.used; j++) {\n node* u = *((node**) elem_at(&v_data->W, j));\n payload* u_data = u->data;\n \n if (u_data->candidate)\n v_data->c++;\n }\n\n DEBUG(\"%d->c = %d\\n\", v->label, v_data->c);\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "es the w value.\n *\n * @g: the graph\n */\nvoid compute_join(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n if (v_data->W.used == 0)\n continue;\n\n int sigma_c = 0;\n for (int j = 0; j < v_data->W.used; j++) {\n node* u = *((node**) elem_at(&v_data->W, j));\n payload* u_data = u->data;\n\n sigma_c += u_data->c;\n }\n\n if (v_data->candidate && sigma_c <= 3*v_data->w) {\n DEBUG(\"%d joining\\n\", v->label);\n v_data->color = BLACK;\n v_data->joined = 1;\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "nating set or not).\n *\n * @g: the graph\n */\nvoid colorize(graph* g) {\n DEBUG(\"starting\\n\");\n\n for (int i = 0; i < g->N; i++) {\n node* v = elem_at(&g->vertices, i);\n payload* v_data = v->data;\n\n if (v_data->color != WHITE)\n continue;\n\n for (int j = 0; j < v->degree; j++) {\n node* u = *((node**) elem_at(&v->neighbors, j));\n payload* u_data = u->data;\n\n if (u_data->color == BLACK) {\n v_data->color = GRAY;\n break;\n }\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bfs_dijkstra.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "graph object\n */\nvoid initialize_graph(graph* g) {\n // allocate the data field for each node\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n\n payload* data = malloc(sizeof(payload));\n\n data->parent_label = -1;\n data->phase_discovered = -1;\n\n cur->data = data;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bfs_dijkstra.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "e\n * discovered.\n */\nint broadcast_start(graph* g, int p) {\n int nobody_was_discovered = 1;\n\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n // this node was just discovered in phase `p`\n if (data->phase_discovered == p) {\n // we send a \"join p+1\" message to all quiet neighbors\n for (int j = 0; j < cur->degree; j++) {\n node* neighbor = *((node**) elem_at(&cur->neighbors, j));\n payload* neighbor_data = neighbor->data;\n\n if (neighbor_data->phase_discovered < 0) {\n neighbor_data->phase_discovered = p+1;\n neighbor_data->parent_label = cur->label;\n nobody_was_discovered = 0;\n }\n }\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "s `min_active` to its neighbors.\n */\n DEBUG(\"broadcasting `min_active`s\\n\");\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n if (data->committee == g->N+1)\n data->min_active = data->default_leader;\n else\n data->min_active = g->N+1;\n\n for (int j = 0; j < cur->degree; j++) {\n node* neighbor = *((node**) elem_at(&cur->neighbors, j));\n enqueue(active_ql, neighbor->label, &data->min_active);\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "adcasted `min_active`s.\n */\n DEBUG(\"receiving broadcasted transmissions\\n\");\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n while(!is_ql_queue_empty(active_ql, i)) {\n int* active = dequeue(active_ql, i);\n data->min_active = min(data->min_active, *active);\n }\n\n data->leader = min(data->leader, data->min_active);\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "* invite_ql) {\n DEBUG(\"starting selection\\n\");\n\n DEBUG(\"creating initial invitations\\n\");\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n if (data->leader == data->default_leader) {\n data->invite.x = i;\n data->invite.y = data->min_active;\n }\n else {\n data->invite.x = g->N+1;\n data->invite.y = g->N+1;\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "roadcast invitations to neighbors.\n */\n DEBUG(\"broadcasting invitations\\n\");\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n for (int j = 0; j < cur->degree; j++) {\n node* neighbor = *((node**) elem_at(&cur->neighbors, j));\n enqueue(invite_ql, neighbor->label, &data->invite);\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "\n * smallest one.\n */\n DEBUG(\"receiving broadcasted invitations\\n\");\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n while (!is_ql_queue_empty(invite_ql, i)) {\n invitation* invite = dequeue(invite_ql, i);\n min_invitation(&data->invite, invite);\n }\n\n // make sure the invite is for us\n if (data->invite.y == data->default_leader && data->invite.x == data->leader)\n data->committee = data->leader;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "legalize_committees(graph* g) {\n DEBUG(\"making sure there aren't any illegal committees\\n\");\n for (int i = 0; i < g->N; i++) {\n node* cur = elem_at(&g->vertices, i);\n payload* data = cur->data;\n\n if (data->committee >= g->N)\n data->committee = i;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_hs.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": " queuelist* send_ql) {\n DEBUG(\"Generating 2*%d messages for %d processes\\n\", N, N);\n\n for (int i = 0; i < N; i++) {\n process* p = processes+i;\n\n /**\n * If this node has been asked (or decided) to not send out any more\n * original messages (all nodes will always be up for propagating; but\n * creating new messages is a privilege that nodes might lose), don't\n * do anything.\n */\n if (p->status == -1)\n continue;\n\n message to_right = {i, 1 << l, 1, 0, 0};\n message to_left = {i, 1 << l, -1, 0, 0};\n\n enqueue(send_ql, i, &to_right);\n enqueue(send_ql, i, &to_left);\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_hs.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": " queuelist* recv_ql) {\n DEBUG(\"propagating messages on phase %d\\n\", l);\n\n for (int i = 0; i < N; i++) {\n DEBUG(\"i = %d\\n\", i);\n process* p = processes+i;\n\n while (!is_ql_queue_empty(send_ql, i)) {\n message* m = dequeue(send_ql, i);\n DEBUG(\"m->starter_label = %d\\n\", m->starter_label);\n\n /**\n * If the starter_label is the current node, then ther are two\n * possibilities:\n * - this node has returned back home; increment status and be\n * done with that message\n * - this node never turned direction; this node is the winner\n * so make this the leader\n * \n * Otherwise the message reached the far end. It's time to change\n * direction, refresh the number of hops_left and go back.\n */\n if (m->starter_label == i && m->hops_left != (1 << l)) {\n if (m->stop_initiating)\n p->status = -1;\n else {\n if (m->direction_changed)\n p->status++;\n else {\n p->status = 3;\n break;\n }\n }\n continue;\n }\n\n if (m->hops_left == 0) {\n DEBUG(\"zero hops left\\n\");\n m->hops_left = 1 << l;\n m->direction *= -1;\n m->direction_changed = 1;\n }\n\n /**\n * Make sure this message is good enough to propagate. A message\n * passes through a node only if the origin is not lesser than\n * the current node's label. A message that passes through a node\n * in one direction _will_ pass through the same node when it's\n * coming back.\n *\n * When a node passes a message along, it can no longer win.\n * Therefore, it'll mark itself as status = -1, meaning that\n * it'll no longer start messages.\n *\n * If a message is not passed through (m->starter_label < i) then\n * the origin must be asked to not pass messages anymore.\n */\n if (m->starter_label < i) {\n /**\n * Of the (1 << l) hops the message intended to complete, it\n * has `hops_left` left, implying that it took\n * `(1 << l) - hops_left` hops to get here. It'll take exactly\n * the same number to go back to its origin.\n */\n m->hops_left = (1 << l) - m->hops_left;\n m->direction *= -1;\n m->direction_changed = 1;\n m->stop_initiating = 1;\n continue;\n }\n else {\n m->hops_left--;\n p->status = -1;\n }\n \n int next_label = (N + i + m->direction) % N;\n enqueue(recv_ql, next_label, m);\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_hs.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "n will automatically set the number of elements to zero after\n * the last element.\n */\n\n for (int i = 0; i < N; i++) {\n process* p = processes+i;\n\n while (!is_ql_queue_empty(recv_ql, i)) {\n enqueue(send_ql, i, dequeue(recv_ql, i));\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_lcr.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "cesses\n * @n: number of processes\n */\nvoid receive_leaders(process* processes, int N) {\n for (int i = 0; i < N; i++) {\n int next = (i+1) % N;\n processes[next].received = processes[i].send;\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_lcr.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "sses\n * @n: number of processes\n */\nvoid determine_leaders(process* processes, int N) {\n for (int i = 0; i < N; i++) {\n if (processes[i].received > processes[i].leader) {\n processes[i].send = processes[i].received;\n processes[i].leader = processes[i].received;\n }\n else if (processes[i].received == processes[i].id) {\n processes[i].leader = processes[i].id;\n processes[i].status = 1;\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_lcr.c", "omp_pragma_line": "#pragma omp parallel for schedule(SCHEDULING_METHOD)", "context_chars": 100, "text": "he chosen leader.\n */\nint identify_leader(process* processes, int N) {\n int chosen_id = -1;\n\n for (int i = 0; i < N; i++) {\n if (processes[i].status == 1) {\n chosen_id = i;\n // this will happen at most once\n }\n } #pragma omp parallel for schedule(SCHEDULING_METHOD)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/polybench-c-3.2/utilities/polybench.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* flush = (double*) calloc (cs, sizeof(double));\n int i;\n double tmp = 0.0;\n#ifdef _OPENMP\nfor (i = 0; i < cs; i++)\n tmp += flush[i];\n assert (tmp <= 10.0);\n free (flush);\n}\n\n\n#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER\nvoid polybench_linux_fifo_scheduler()\n{\n /* Use FIFO scheduler to limit OS interference. Program must be run\n as root, and this works only for Linux kernels. */\n struct sched_param schedParam;\n schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);\n sched_setscheduler (0, SCHED_FIFO, &schedParam);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)", "context_chars": 100, "text": "---------------------------------------------------------*/\n\tnorm_temp11 = 0.0;\n\tnorm_temp12 = 0.0;\nfor (j = 1; j <= lastcol-firstcol+1; j++) {\n norm_temp11 = norm_temp11 + x[j]*z[j];\n norm_temp12 = norm_temp12 + z[j]*z[j];\n\t} #pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j)", "context_chars": 100, "text": "-\nc Normalize z to obtain x\nc-------------------------------------------------------------------*/\nfor (j = 1; j <= lastcol-firstcol+1; j++) {\n x[j] = norm_temp12*z[j];\n\t} #pragma omp parallel for default(shared) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "ing vector to (1, 1, .... 1)\nc-------------------------------------------------------------------*/\nfor (i = 1; i <= NA+1; i++) {\n x[i] = 1.0;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)", "context_chars": 100, "text": "--------------------------------------------------------*/\n\tnorm_temp11 = 0.0;\n\tnorm_temp12 = 0.0;\n\nfor (j = 1; j <= lastcol-firstcol+1; j++) {\n norm_temp11 = norm_temp11 + x[j]*z[j];\n norm_temp12 = norm_temp12 + z[j]*z[j];\n\t} #pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j)", "context_chars": 100, "text": "-\nc Normalize z to obtain x\nc-------------------------------------------------------------------*/\nfor (j = 1; j <= lastcol-firstcol+1; j++) {\n x[j] = norm_temp12*z[j];\n\t} #pragma omp parallel for default(shared) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": " to mark nonzero positions\nc---------------------------------------------------------------------*/\nfor (i = 1; i <= n; i++) {\n\tcolidx[n+i] = 0;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j)", "context_chars": 100, "text": "umber of triples in each row\nc-------------------------------------------------------------------*/\nfor (j = 1; j <= n; j++) {\n\trowstr[j] = 0;\n\tmark[j] = FALSE;\n } #pragma omp parallel for default(shared) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(k,j)", "context_chars": 100, "text": " ... preload data pages\nc---------------------------------------------------------------------*/\nfor(j = 0;j <= nrows-1;j++) {\n for(k = rowstr[j];k <= rowstr[j+1]-1;k++)\n\t a[k] = 0.0;\n } #pragma omp parallel for default(shared) private(k,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i) ", "context_chars": 100, "text": "adding elements\nc-------------------------------------------------------------------*/\n nza = 0;\nfor (i = 1; i <= n; i++) {\n\tx[i] = 0.0;\n\tmark[i] = FALSE;\n } #pragma omp parallel for default(shared) private(i) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/FT/ft-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k) ", "context_chars": 100, "text": "ier space\nc-------------------------------------------------------------------*/\n\n int i, j, k;\n\nfor (k = 0; k < d[2]; k++) {\n\tfor (j = 0; j < d[1]; j++) {\n for (i = 0; i < d[0]; i++) {\n\t crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i,j,k) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/FT/ft-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,ii,ii2,jj,ij2,kk) ", "context_chars": 100, "text": "ck:\nc mod(i-1+n/2, n) - n/2\nc-------------------------------------------------------------------*/\n\nfor (i = 0; i < dims[2][0]; i++) {\n\tii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;\n\tii2 = ii*ii;\n\tfor (j = 0; j < dims[2][1]; j++) {\n jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;\n ij2 = jj*jj+ii2;\n for (k = 0; k < dims[2][2]; k++) {\n\t\tkk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;\n\t\tindexmap[k][j][i] = kk*kk+ij2;\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i,j,k,ii,ii2,jj,ij2,kk) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/SP/sp-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)", "context_chars": 100, "text": "--------------------------------------------*/\n\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\nfor (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n \n\tt1 = bt * r3;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = -r2;\n\trhs[1][i][j][k] = r1;\n\trhs[2][i][j][k] = bt * ( r4 - r5 );\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/SP/sp-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)", "context_chars": 100, "text": "-------------------------------------------*/\n\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n\nfor (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = bt * r1;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = bt * ( r4 - r5 );\n\trhs[1][i][j][k] = -r3;\n\trhs[2][i][j][k] = r2;\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/EP/ep-orig.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": " code.\n*/\n vranlc(0, &(dum[0]), dum[1], &(dum[2]));\n dum[0] = randlc(&(dum[1]), dum[2]);\n \nfor (i = 0; i < 2*NK; i++) x[i] = -1.0e99;\n \n Mops = log(sqrt(fabs(max(1.0, 1.0))));\n\n timer_clear(1);\n timer_clear(2);\n timer_clear(3);\n timer_start(1);\n\n vranlc(0, &t1, A, x);\n\n/* Compute AN = A ^ (2 * NK) (mod 2^46). */\n\n t1 = A;\n\n for ( i = 1; i <= MK+1; i++) {\n\tt2 = randlc(&t1, t1);\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2) ", "context_chars": 100, "text": "---------------------------------------------------*/\n\n int i3, i2, i1;\n double r1[M], r2[M];\nfor (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tr1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]\n\t\t + r[i3-1][i2][i1] + r[i3+1][i2][i1];\n\t\tr2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]\n\t\t + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];\n\t }\n for (i1 = 1; i1 < n1-1; i1++) {\n\t\tu[i3][i2][i1] = u[i3][i2][i1]\n\t\t + c[0] * r[i3][i2][i1]\n\t\t + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]\n\t\t\t + r1[i1] )\n\t\t + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );\n/*--------------------------------------------------------------------\nc Assume c(3) = 0 (Enable line below if c(3) not= 0)\nc---------------------------------------------------------------------\nc > + c(3) * ( r2(i1-1) + r2(i1+1) )\nc-------------------------------------------------------------------*/\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)", "context_chars": 100, "text": "---------------------------------------------------*/\n\n int i3, i2, i1;\n double u1[M], u2[M];\nfor (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tu1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]\n\t\t + u[i3-1][i2][i1] + u[i3+1][i2][i1];\n\t\tu2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]\n\t\t + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];\n\t }\n\t for (i1 = 1; i1 < n1-1; i1++) {\n\t\tr[i3][i2][i1] = v[i3][i2][i1]\n\t\t - a[0] * u[i3][i2][i1]\n/*--------------------------------------------------------------------\nc Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0)\nc---------------------------------------------------------------------\nc > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3)\nc > + u1(i1) )\nc-------------------------------------------------------------------*/\n\t\t- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )\n\t\t - a[3] * ( u2[i1-1] + u2[i1+1] );\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)", "context_chars": 100, "text": "lse {\n d2 = 1;\n }\n\n if (m3k == 3) {\n d3 = 2;\n } else {\n d3 = 1;\n }\nfor (j3 = 1; j3 < m3j-1; j3++) {\n\ti3 = 2*j3-d3;\n/*C i3 = 2*j3-1*/\n\tfor (j2 = 1; j2 < m2j-1; j2++) {\n i2 = 2*j2-d2;\n/*C i2 = 2*j2-1*/\n\n for (j1 = 1; j1 < m1j; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\tx1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]\n\t\t + r[i3][i2+1][i1] + r[i3+2][i2+1][i1];\n\t\ty1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]\n\t\t + r[i3][i2+2][i1] + r[i3+2][i2+2][i1];\n\t }\n\n for (j1 = 1; j1 < m1j-1; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\ty2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1]\n\t\t + r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1];\n\t\tx2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1]\n\t\t + r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1];\n\t\ts[j3][j2][j1] =\n\t\t 0.5 * r[i3+1][i2+1][i1+1]\n\t\t + 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)\n\t\t + 0.125 * ( x1[i1] + x1[i1+2] + y2)\n\t\t + 0.0625 * ( y1[i1] + y1[i1+2] );\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)", "context_chars": 100, "text": " parameter( m=535 )\n*/\n double z1[M], z2[M], z3[M];\n\n if ( n1 != 3 && n2 != 3 && n3 != 3 ) {\nfor (i3 = 0; i3 < mm3-1; i3++) {\n for (i2 = 0; i2 < mm2-1; i2++) {\n\t\tfor (i1 = 0; i1 < mm1; i1++) {\n\t\t z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];\n\t\t z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];\n\t\t z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]\n\t\t\t+z[i3][i2][i1];\n\t\t u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]\n\t\t\t+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]\n\t\t\t+0.5 * z1[i1];\n\t\t u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]\n\t\t\t+0.25*( z1[i1] + z1[i1+1] );\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]\n\t\t\t+0.5 * z2[i1];\n\t\t u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]\n\t\t\t+0.25*( z2[i1] + z2[i1+1] );\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]\n\t\t\t+0.25* z3[i1];\n\t\t u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]\n\t\t\t+0.125*( z3[i1] + z3[i1+1] );\n\t\t}\n\t }\n\t} #pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,a) reduction(+:s) reduction(max:tmp)", "context_chars": 100, "text": "--*/\n\n double s = 0.0;\n int i3, i2, i1, n;\n double a = 0.0, tmp = 0.0;\n\n n = nx*ny*nz;\n\nfor (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 1; i1 < n1-1; i1++) {\n\t\ts = s + r[i3][i2][i1] * r[i3][i2][i1];\n\t\ta = fabs(r[i3][i2][i1]);\n\t\tif (a > tmp) tmp = a;\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i1,i2,i3,a) reduction(+:s) reduction(max:tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i2, i1) ", "context_chars": 100, "text": "ber\\n\");\n for (i = MM-1; i >= 0; i--) {\n\tprintf(\" %4d\", jg[0][i][1]);\n }\n printf(\"\\n\");*/\n\nfor (i3 = 0; i3 < n3; i3++) {\n\tfor (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for private(i2, i1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1,i2,i3)", "context_chars": 100, "text": "-------\nc-------------------------------------------------------------------*/\n\n int i1, i2, i3;\nfor (i3 = 0;i3 < n3; i3++) {\n\tfor (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for private(i1,i2,i3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)", "context_chars": 100, "text": "->inputs);\n\n\tint h, j, k;\n\tdouble *mw;\n\tdouble sum;\n\n\tif (!ann->hidden_layers) {\n\t\tdouble *ret = o;\nfor (j = 0; j < ann->outputs; ++j) {\n\t\t\tmw = w + ((ann->inputs + 1) * j);\n\t\t\tsum = *mw++ * -1.0;\n\t\t\tfor (k = 0; k < ann->inputs; ++k) {\n\t\t\t\tsum += *mw++ * i[k];\n\t\t\t}\n\t\t\tdouble output = genann_act_output(ann, sum);\n\t\t\t*(o + j) = output;\n\t\t} #pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)", "context_chars": 100, "text": " genann_act_output(ann, sum);\n\t\t\t*(o + j) = output;\n\t\t}\n\t\treturn ret;\n\t}\n\n\t/* Figure input layer */\nfor (j = 0; j < ann->hidden; ++j) {\n\t\tmw = w + ((ann->inputs + 1) * j);\n\t\tsum = *mw++ * -1.0;\n\t\tfor (k = 0; k < ann->inputs; ++k) {\n\t\t\tsum += *mw++ * i[k];\n\t\t}\n\t\tdouble output = genann_act_output(ann, sum);\n\t\t*(o + j) = output;\n\t} #pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)", "context_chars": 100, "text": "o += ann->hidden;\n\n\t/* Figure hidden layers, if any. */\n\tfor (h = 1; h < ann->hidden_layers; ++h) {\nfor (j = 0; j < ann->hidden; ++j) {\n\t\t\tmw = w + ((ann->hidden + 1) * j);\n\t\t\tsum = *mw++ * -1.0;\n\t\t\tfor (k = 0; k < ann->hidden; ++k) {\n\t\t\t\tsum += *mw++ * i[k];\n\t\t\t}\n\t\t\tdouble output = genann_act_output(ann, sum);\n\t\t\t*(o + j) = output;\n\t\t} #pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)", "context_chars": 100, "text": ");\n\t\ti += ann->hidden;\n\t\to += ann->hidden;\n\t}\n\n\tdouble const *ret = o;\n\n\t/* Figure output layer. */\nfor (j = 0; j < ann->outputs; ++j) {\n\t\tmw = w + ((ann->hidden + 1) * j);\n\t\tsum = *mw++ * -1.0;\n\t\tfor (k = 0; k < ann->hidden; ++k) {\n\t\t\tsum += *mw++ * i[k];\n\t\t}\n\t\tdouble output = genann_act_output(ann, sum);\n\t\t*(o + j) = output;\n\t} #pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d, t, o) private(j)", "context_chars": 100, "text": "/\n\t\tif (genann_act_output == genann_act_linear ||\n\t\t\tann->activation_output == genann_act_linear) {\nfor (j = 0; j < ann->outputs; ++j) {\n\t\t\t\t*(d + j) = *(t + j) - *(o + j);\n\t\t\t} #pragma omp parallel for firstprivate(d, t, o) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d, t, o) private(j)", "context_chars": 100, "text": "te(j)\n\t\t\tfor (j = 0; j < ann->outputs; ++j) {\n\t\t\t\t*(d + j) = *(t + j) - *(o + j);\n\t\t\t}\n\t\t}\n\t\telse {\nfor (j = 0; j < ann->outputs; ++j) {\n\t\t\t\t*(d + j) = (*(t + j) - *(o + j)) * *(o + j) * (1.0 - *(o + j));\n\t\t\t} #pragma omp parallel for firstprivate(d, t, o) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(j, k) firstprivate(o, d)", "context_chars": 100, "text": "st ww = ann->weight + ((ann->inputs + 1) * ann->hidden) + ((ann->hidden + 1) * ann->hidden * (h));\n\nfor (j = 0; j < ann->hidden; ++j) {\n\n\t\t\tdouble delta = 0;\n\n\t\t\tfor (k = 0; k < (h == ann->hidden_layers - 1 ? ann->outputs : ann->hidden); ++k) {\n\t\t\t\tconst double forward_delta = dd[k];\n\t\t\t\tconst int windex = k * (ann->hidden + 1) + (j + 1);\n\t\t\t\tconst double forward_weight = ww[windex];\n\t\t\t\tdelta += forward_delta * forward_weight;\n\t\t\t}\n\n\t\t\t*(d + j) = *(o + j) * (1.0 - *(o + j)) * delta;\n\t\t} #pragma omp parallel for private(j, k) firstprivate(o, d)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(j, k) firstprivate(d, w)", "context_chars": 100, "text": "ann->inputs + (ann->hidden) * (ann->hidden_layers - 1))\n\t\t\t: 0);\n\n\t\t/* Set output layer weights. */\nfor (j = 0; j < ann->outputs; ++j) {\n\t\t\tdouble *mw = w + ((ann->hidden_layers ? ann->hidden : ann->inputs) + 1) * j;\n\t\t\t*mw++ += *(d + j) * learning_rate * -1.0;\n\t\t\tfor (k = 1; k < (ann->hidden_layers ? ann->hidden : ann->inputs) + 1; ++k) {\n\t\t\t\t*mw++ += *(d + j) * learning_rate * i[k - 1];\n\t\t\t}\n\t\t} #pragma omp parallel for private(j, k) firstprivate(d, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(j, k) firstprivate(d, w)", "context_chars": 100, "text": "+ (h\n\t\t\t? ((ann->inputs + 1) * ann->hidden + (ann->hidden + 1) * (ann->hidden) * (h - 1))\n\t\t\t: 0);\n\nfor (j = 0; j < ann->hidden; ++j) {\n\t\t\tdouble *mw = w + ((h == 0 ? ann->inputs : ann->hidden) + 1) * j;\n\t\t\t*mw++ += *(d + j) * learning_rate * -1.0;\n\t\t\tfor (k = 1; k < (h == 0 ? ann->inputs : ann->hidden) + 1; ++k) {\n\t\t\t\t*mw++ += *(d + j) * learning_rate * i[k - 1];\n\t\t\t}\n\t\t} #pragma omp parallel for private(j, k) firstprivate(d, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)", "context_chars": 100, "text": "n->inputs);\n\n\tint h, j, k;\n\tdouble *mw;\n\tdouble sum;\n\tif (!ann->hidden_layers) {\n\t\tdouble *ret = o;\nfor (j = 0; j < ann->outputs; ++j) {\n\t\t\tmw = w + ((ann->inputs + 1) * j);\n\t\t\tsum = *mw++ * -1.0;\n\t\t\tfor (k = 0; k < ann->inputs; ++k) {\n\t\t\t\tsum += *mw++ * i[k];\n\t\t\t}\n\t\t\tdouble output = genann_act_output(ann, sum);\n\t\t\t*(o + j) = output;\n\t\t} #pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)", "context_chars": 100, "text": " genann_act_output(ann, sum);\n\t\t\t*(o + j) = output;\n\t\t}\n\t\treturn ret;\n\t}\n\n\t/* Figure input layer */\nfor (j = 0; j < ann->hidden; ++j) {\n\t\tmw = w + ((ann->inputs + 1) * j);\n\t\tsum = *mw++ * -1.0;\n\t\tfor (k = 0; k < ann->inputs; ++k) {\n\t\t\tsum += *mw++ * i[k];\n\t\t}\n\t\tdouble output = genann_act_output(ann, sum);\n\t\t*(o + j) = output;\n\t} #pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)", "context_chars": 100, "text": "o += ann->hidden;\n\n\t/* Figure hidden layers, if any. */\n\tfor (h = 1; h < ann->hidden_layers; ++h) {\nfor (j = 0; j < ann->hidden; ++j) {\n\t\t\tmw = w + ((ann->hidden + 1) * j);\n\t\t\tsum = *mw++ * -1.0;\n\t\t\tfor (k = 0; k < ann->hidden; ++k) {\n\t\t\t\tsum += *mw++ * i[k];\n\t\t\t}\n\t\t\tdouble output = genann_act_output(ann, sum);\n\t\t\t*(o + j) = output;\n\t\t} #pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)", "context_chars": 100, "text": ");\n\t\ti += ann->hidden;\n\t\to += ann->hidden;\n\t}\n\n\tdouble const *ret = o;\n\n\t/* Figure output layer. */\nfor (j = 0; j < ann->outputs; ++j) {\n\t\tmw = w + ((ann->hidden + 1) * j);\n\t\tsum = *mw++ * -1.0;\n\t\tfor (k = 0; k < ann->hidden; ++k) {\n\t\t\tsum += *mw++ * i[k];\n\t\t}\n\t\tdouble output = genann_act_output(ann, sum);\n\t\t*(o + j) = output;\n\t} #pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d, t, o) private(j)", "context_chars": 100, "text": "/\n\t\tif (genann_act_output == genann_act_linear ||\n\t\t\tann->activation_output == genann_act_linear) {\nfor (j = 0; j < ann->outputs; ++j) {\n\t\t\t\t*(d + j) = *(t + j) - *(o + j);\n\t\t\t} #pragma omp parallel for firstprivate(d, t, o) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d, t, o) private(j)", "context_chars": 100, "text": "te(j)\n\t\t\tfor (j = 0; j < ann->outputs; ++j) {\n\t\t\t\t*(d + j) = *(t + j) - *(o + j);\n\t\t\t}\n\t\t}\n\t\telse {\nfor (j = 0; j < ann->outputs; ++j) {\n\t\t\t\t*(d + j) = (*(t + j) - *(o + j)) * *(o + j) * (1.0 - *(o + j));\n\t\t\t} #pragma omp parallel for firstprivate(d, t, o) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(j, k) firstprivate(o, d)", "context_chars": 100, "text": "st ww = ann->weight + ((ann->inputs + 1) * ann->hidden) + ((ann->hidden + 1) * ann->hidden * (h));\n\nfor (j = 0; j < ann->hidden; ++j) {\n\n\t\t\tdouble delta = 0;\n\n\t\t\tfor (k = 0; k < (h == ann->hidden_layers - 1 ? ann->outputs : ann->hidden); ++k) {\n\t\t\t\tconst double forward_delta = dd[k];\n\t\t\t\tconst int windex = k * (ann->hidden + 1) + (j + 1);\n\t\t\t\tconst double forward_weight = ww[windex];\n\t\t\t\tdelta += forward_delta * forward_weight;\n\t\t\t}\n\n\t\t\t*(d + j) = *(o + j) * (1.0 - *(o + j)) * delta;\n\t\t} #pragma omp parallel for private(j, k) firstprivate(o, d)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(j, k) firstprivate(d, w)", "context_chars": 100, "text": "ann->inputs + (ann->hidden) * (ann->hidden_layers - 1))\n\t\t\t: 0);\n\n\t\t/* Set output layer weights. */\nfor (j = 0; j < ann->outputs; ++j) {\n\t\t\tdouble *mw = w + ((ann->hidden_layers ? ann->hidden : ann->inputs) + 1) * j;\n\t\t\t*mw++ += *(d + j) * learning_rate * -1.0;\n\t\t\tfor (k = 1; k < (ann->hidden_layers ? ann->hidden : ann->inputs) + 1; ++k) {\n\t\t\t\t*mw++ += *(d + j) * learning_rate * i[k - 1];\n\t\t\t}\n\t\t} #pragma omp parallel for private(j, k) firstprivate(d, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for private(j, k) firstprivate(d, w)", "context_chars": 100, "text": "+ (h\n\t\t\t? ((ann->inputs + 1) * ann->hidden + (ann->hidden + 1) * (ann->hidden) * (h - 1))\n\t\t\t: 0);\n\nfor (j = 0; j < ann->hidden; ++j) {\n\t\t\tdouble *mw = w + ((h == 0 ? ann->inputs : ann->hidden) + 1) * j;\n\t\t\t*mw++ += *(d + 1) * learning_rate * -1.0;\n\t\t\tfor (k = 1; k < (h == 0 ? ann->inputs : ann->hidden) + 1; ++k) {\n\t\t\t\t*mw++ += *(d + 1) * learning_rate * i[k - 1];\n\t\t\t}\n\t\t} #pragma omp parallel for private(j, k) firstprivate(d, w)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "size * 10;\n\t\t}\n\n\t\tgenann *save = genann_copy(ann);\n\n\t\t/* Take a random guess at the ANN weights. */\nfor (i = 0; i < ann->total_weights; ++i) {\n\t\t\tann->weight[i] += ((double)rand()) / RAND_MAX - 0.5;\n\t\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:err)", "context_chars": 100, "text": " ((double)rand()) / RAND_MAX - 0.5;\n\t\t}\n\n\t\t/* See how we did. */\n\t\terr = 0;\n\t\tint ind = 0, ans = 0;\nfor (i = 0; i < size; i++) {\n\t\t\tconst double *guess = genann_run(ann, inputs[i]);\n\t\t\terr += genann_difference(guess, desired_outputs[i], 10);\n\t\t} #pragma omp parallel for reduction(+:err)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/billDrett/GameOfLife-MPI-OpenMp-Cuda-/openMP/openmpMasterThreadCommunication/main.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "neigbors\n {\n MPI_Start(&receiveReq[pos][i]);\n }\n\n //for(i = 2; i < nRows-2; i++) //calculate data inside the 2d array where we have all the data to find the new value of the cell\n {\n #pragma omp for\n for(j = 2; j #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/billDrett/GameOfLife-MPI-OpenMp-Cuda-/openMP/openmpParrallelCommunication/main.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "neigbors\n {\n MPI_Start(&receiveReq[pos][i]);\n }\n\n //for(i = 2; i < nRows-2; i++) //calculate data inside the 2d array where we have all the data to find the new value of the cell\n {\n #pragma omp for\n for(j = 2; j #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mperlet/matrix_multiplication/src/omp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t_matrix->cols, sizeof(double));\n\n // calculate the result matrix with omp (use pragma)\n \n for (int i = 0; i < result_matrix->rows; i++) {\n for (int j = 0; j < result_matrix->cols; j++) {\n for (int k = 0; k < m_1->cols; k++) {\n result_matrix->mat_data[i][j] += m_1->mat_data[i][k] * m_2->mat_data[k][j];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "add_pd(\n _mm256_add_pd(_mm256_add_pd(sum1, sum2), _mm256_add_pd(sum3, sum4)));\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "add_ps(\n _mm256_add_ps(_mm256_add_ps(sum1, sum2), _mm256_add_ps(sum3, sum4)));\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i;\n#ifdef USE_AVX\n#pragma omp parallel shared(n, data1, data2) private(i) reduction(+ : sum)\n {\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "arallel for\n for (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i;\n#ifdef USE_AVX\n#pragma omp parallel shared(n, data1, data2) private(i) reduction(+ : sum)\n {\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "arallel for\n for (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i;\n#ifdef USE_AVX\n#pragma omp parallel shared(n, data1, data2) private(i) reduction(+ : sum)\n {\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "arallel for\n for (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "c) {\n int i;\n // int num_t = omp_get _num_threads();\n int num_b = size / sizeof(unsigned long);\n\n#pragma ivdep\n //#pragma vector nontemporal (dst)\n for (i = 0; i < num_b; i++) {\n // memcpy((unsigned char *) dst+i, (unsigned char *) src+i, size);\n *((unsigned long *)dst + i) = *((unsigned long *)src + i);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ze);\n int i;\n // int num_t = omp_get _num_threads();\n int num_b = size / sizeof(unsigned long);\n\n#pragma ivdep\n //#pragma vector nontemporal (dst)\n for (i = 0; i < num_b; i++) {\n // memcpy((unsigned char *) dst+i, (unsigned char *) src+i, size);\n *((unsigned long *)dst + i) = *((unsigned long *)src + i);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "add_pd(\n _mm256_add_pd(_mm256_add_pd(sum1, sum2), _mm256_add_pd(sum3, sum4)));\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "add_ps(\n _mm256_add_ps(_mm256_add_ps(sum1, sum2), _mm256_add_ps(sum3, sum4)));\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i;\n#ifdef USE_AVX\n#pragma omp parallel shared(n, data1, data2) private(i) reduction(+ : sum)\n {\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "arallel for\n for (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i;\n#ifdef USE_AVX\n#pragma omp parallel shared(n, data1, data2) private(i) reduction(+ : sum)\n {\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "arallel for\n for (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i;\n#ifdef USE_AVX\n#pragma omp parallel shared(n, data1, data2) private(i) reduction(+ : sum)\n {\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)", "context_chars": 100, "text": "arallel for\n for (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data1[i] * data2[i];\n } #pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)", "context_chars": 100, "text": ")\n {\n#pragma omp for\n for (i = 0; i < n; i++) {\n sum += data[i];\n }\n }\n#else\nfor (i = 0; i < n; i++) {\n sum += data[i];\n } #pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ntuhpc/training-ay1819/4-Parallel_Programming_Models/OpenMP/solution/vec_total_omp_buggy.cpp", "omp_pragma_line": "#pragma omp parallel for default(none) shared(v1, v2, result)", "context_chars": 100, "text": ";\n }\n cout<<\"\\n\";\n}\n\nint vec_sum(vector v1, vector v2)\n{\n int result=0;\n \n for (int i=0; i #pragma omp parallel for default(none) shared(v1, v2, result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ntuhpc/training-ay1819/4-Parallel_Programming_Models/OpenMP/solution/vec_total_omp.cpp", "omp_pragma_line": "#pragma omp parallel for default(none) shared(v1, v2) reduction (+:result)", "context_chars": 100, "text": ";\n }\n cout<<\"\\n\";\n}\n\nint vec_sum(vector v1, vector v2)\n{\n int result=0;\n \n\n for (int i=0; i #pragma omp parallel for default(none) shared(v1, v2) reduction (+:result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ntuhpc/training-ay1819/4-Parallel_Programming_Models/OpenMP/solution/vec_add_omp.cpp", "omp_pragma_line": "#pragma omp parallel for default(none) shared(v1,v2,result)", "context_chars": 100, "text": "\n\nvector vec_add(vector v1, vector v2)\n{\n vector result(v1.size());\n \n for (int i=0; i #pragma omp parallel for default(none) shared(v1,v2,result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ntuhpc/training-ay1819/OpenMP/Example/program_with_bugs/omp_bug1.c", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": "[N];\n\n/* Some initializations */\nfor (i=0; i < N; i++)\n a[i] = b[i] = i * 1.0;\nchunk = CHUNKSIZE;\n\nshared(a,b,c,chunk) \\\n private(i,tid) \\\n schedule(static,chunk)\n {\n tid = omp_get_thread_num();\n for (i=0; i < N; i++)\n {\n c[i] = a[i] + b[i];\n printf(\"tid= %d i= %d c[i]= %f\\n\", tid, i, c[i]);\n }\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ntuhpc/training-ay1819/OpenMP/Example/program_with_bugs/omp_bug1fix.c", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": "ializations */\nfor (i=0; i < N; i++)\n a[i] = b[i] = i * 1.0;\nchunk = CHUNKSIZE;\nfirst_time = 'y';\n\nshared(a,b,c,chunk) \\\n private(i,tid) \\\n schedule(static,chunk) \\\n firstprivate(first_time)\n\n for (i=0; i < N; i++)\n {\n if (first_time == 'y')\n {\n tid = omp_get_thread_num();\n first_time = 'n';\n }\n c[i] = a[i] + b[i];\n printf(\"tid= %d i= %d c[i]= %f\\n\", tid, i, c[i]);\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lanl/libquo/demos/mpi-openmp/dgemv.c", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": " const double *restrict y,\n int64_t n)\n{\n double res = 0.0;\n int64_t i = 0;\n\ndefault(shared) private(i) \\\n schedule(static, DDOT_OMP_CHUNK_SIZE) \\\n reduction(+:res)\n for (i = 0; i < n; ++i) {\n res += x[i] * y[i];\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/starpu-runtime/starpu/examples/parallel_workers/parallel_workers.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "parallel\n\tfprintf(stderr, \"hello from the task %d\\n\", omp_get_thread_num());\n\tfor (k=0;k<10;k++)\n\t{\nfor (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/starpu-runtime/starpu/examples/openmp/vector_scal_omp.c", "omp_pragma_line": "#pragma omp parallel for num_threads(starpu_combined_worker_get_size())", "context_chars": 100, "text": "PTR(vector);\n\n\tFPRINTF(stderr, \"running task with %d CPUs.\\n\", starpu_combined_worker_get_size());\n\nfor (i = 0; i < n; i++)\n\t{\n\t\tfloat v = val[i];\n\t\tint j;\n\t\tfor (j = 0; j < 100; j++)\n\t\t\tv = v * f;\n\t\tval[i] = v;\n\t} #pragma omp parallel for num_threads(starpu_combined_worker_get_size())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/starpu-runtime/starpu/examples/sched_ctx/parallel_tasks_reuse_handle.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ET_PTR(descr[2]);\n\tint size = STARPU_VECTOR_GET_NX(descr[0]);\n\n\tint i, k;\n\tfor (k=0;kfor (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/starpu-runtime/starpu/doc/doxygen/chapters/starpu_extensions/code/forkmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(starpu_combined_worker_get_size())", "context_chars": 100, "text": "nsigned n = STARPU_VECTOR_GET_NX(vector);\n float *val = (float *)STARPU_VECTOR_GET_PTR(vector);\n\nfor (i = 0; i < n; i++)\n val[i] *= *factor;\n}\n\nstatic struct starpu_codelet cl =\n{\n .modes = { STARPU_RW },\n .where = STARPU_CPU,\n .type = STARPU_FORKJOIN,\n .max_parallelism = INT_MAX,\n .cpu_funcs = {scal_cpu_func},\n .cpu_funcs_name = {\"scal_cpu_func\"},\n .nbuffers = 1,\n} #pragma omp parallel for num_threads(starpu_combined_worker_get_size())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/starpu-runtime/starpu/tests/main/combined_workers/bfs/bfs_func/bfs_omp_func.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(starpu_combined_worker_get_size())", "context_chars": 100, "text": "l stop;\n\tdo\n\t{\n\t\t//if no thread changes this value then the loop stops\n\t\tstop=false;\n\n#ifdef OPEN\n\t\tfor(int tid = 0; tid < no_of_nodes; tid++ )\n\t\t{\n\t\t\tif (graph_mask[tid] == true)\n\t\t\t{ \n\t\t\t\tgraph_mask[tid]=false;\n\t\t\t\tfor(int i=graph_nodes[tid].starting; i<(graph_nodes[tid].no_of_edges + graph_nodes[tid].starting); i++)\n\t\t\t\t{\n\t\t\t\t\tint id = graph_edges[i];\n\t\t\t\t\tif(!graph_visited[id])\n\t\t\t\t\t\t{\n\t\t\t\t\t\tcost[id]=cost[tid]+1;\n\t\t\t\t\t\tupdating_graph_mask[id]=true;\n\t\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} #pragma omp parallel for num_threads(starpu_combined_worker_get_size())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WarwickRSE/ParallelismPrimer/OpenMP/C/01 Loop Decompose.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int *its_per_proc;\n\n nproc = omp_get_max_threads();\n its_per_proc = calloc(nproc, sizeof(int));\nfor (i = 0; i< MAX_ITS; ++i){\n thread_id = omp_get_thread_num();\n its_per_proc[thread_id] ++;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WarwickRSE/ParallelismPrimer/Examples/Collatz/C/collatz_omp_guided.c", "omp_pragma_line": "#pragma omp parallel for private(iint, itrial) shared(counts) schedule(guided)", "context_chars": 100, "text": " < NUMS; ++iint){\n counts[iint] = -1;\n }\n /*Loop over all numbers to start the sequence from*/\nfor (iint = 0; iint < NUMS; ++iint){\n icurr = iint + 1; /*Capture the value to start from*/\n /*Loop to a maximum number of iterations\n You can check if any are not converged after MAXITS by checking for\n the sentinal value of -1 in count*/\n for (itrial = 1; itrial <= MAXITS; ++itrial){\n /*If icurr is even divide by two*/\n if (icurr%2 == 0) {\n icurr = icurr / 2;\n } else {\n /*Otherwise multiply by 3 and add 1*/\n icurr = icurr * 3 + 1;\n }\n /*If the number reaches 1 then sequence has converged*/\n if (icurr == 1) {\n counts[iint] = itrial;\n break;\n }\n }\n } #pragma omp parallel for private(iint, itrial) shared(counts) schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WarwickRSE/ParallelismPrimer/Examples/Collatz/C/collatz_omp.c", "omp_pragma_line": "#pragma omp parallel for private(iint, itrial) shared(counts)", "context_chars": 100, "text": " < NUMS; ++iint){\n counts[iint] = -1;\n }\n /*Loop over all numbers to start the sequence from*/\nfor (iint = 0; iint < NUMS; ++iint){\n icurr = iint + 1; /*Capture the value to start from*/\n /*Loop to a maximum number of iterations\n You can check if any are not converged after MAXITS by checking for\n the sentinal value of -1 in count*/\n for (itrial = 1; itrial <= MAXITS; ++itrial){\n /*If icurr is even divide by two*/\n if (icurr%2 == 0) {\n icurr = icurr / 2;\n } else {\n /*Otherwise multiply by 3 and add 1*/\n icurr = icurr * 3 + 1;\n }\n /*If the number reaches 1 then sequence has converged*/\n if (icurr == 1) {\n counts[iint] = itrial;\n break;\n }\n }\n } #pragma omp parallel for private(iint, itrial) shared(counts)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WarwickRSE/ParallelismPrimer/Examples/Collatz/C/collatz_omp_dynamic.c", "omp_pragma_line": "#pragma omp parallel for private(iint, itrial) shared(counts) schedule(dynamic)", "context_chars": 100, "text": " < NUMS; ++iint){\n counts[iint] = -1;\n }\n /*Loop over all numbers to start the sequence from*/\nfor (iint = 0; iint < NUMS; ++iint){\n icurr = iint + 1; /*Capture the value to start from*/\n /*Loop to a maximum number of iterations\n You can check if any are not converged after MAXITS by checking for\n the sentinal value of -1 in count*/\n for (itrial = 1; itrial <= MAXITS; ++itrial){\n /*If icurr is even divide by two*/\n if (icurr%2 == 0) {\n icurr = icurr / 2;\n } else {\n /*Otherwise multiply by 3 and add 1*/\n icurr = icurr * 3 + 1;\n }\n /*If the number reaches 1 then sequence has converged*/\n if (icurr == 1) {\n counts[iint] = itrial;\n break;\n }\n }\n } #pragma omp parallel for private(iint, itrial) shared(counts) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WarwickRSE/ParallelismPrimer/Examples/Prime/C/prime_omp_guided.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:ct) schedule(guided)", "context_chars": 100, "text": "ll be accumulated on each thread individually\nand then summed at the end to get the total count */\n\nfor (i = 2; i<=20000000; ++i){\n if (check_prime(i))ct = ct + 1;\n } #pragma omp parallel for private(i) reduction(+:ct) schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WarwickRSE/ParallelismPrimer/Examples/Prime/C/prime_omp_dynamic.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:ct) schedule(dynamic)", "context_chars": 100, "text": "ll be accumulated on each thread individually\nand then summed at the end to get the total count */\n\nfor (i = 2; i<=20000000; ++i){\n if (check_prime(i))ct = ct + 1;\n } #pragma omp parallel for private(i) reduction(+:ct) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WarwickRSE/ParallelismPrimer/Examples/Prime/C/prime_omp.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:ct)", "context_chars": 100, "text": "ll be accumulated on each thread individually\nand then summed at the end to get the total count */\n\nfor (i = 2; i<=20000000; ++i){\n if (check_prime(i))ct = ct + 1;\n } #pragma omp parallel for private(i) reduction(+:ct)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/trmm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(x) < (y) ? (x) : (y))\n#define max(x,y) ((x) > (y) ? (x) : (y))\n#pragma scop\nif (_PB_M >= 1) {\n for (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n if (_PB_M >= 2) {\n const int ii0_prim_lb = 0, ii0_prim_ub = (_PB_M - 1) / 8;\n for (register int ii0_prim = ii0_prim_lb; ii0_prim <= ii0_prim_ub; ii0_prim += 1) {\n const int ii3_prim_lb = ii0_prim, ii3_prim_ub = (_PB_M - 1) / 8;\n for (register int ii3_prim = ii3_prim_lb; ii3_prim <= ii3_prim_ub; ii3_prim += 1) {\n const int i0_lb = 8 * ii0_prim, i0_ub = min(min(_PB_M - 2, 8 * ii0_prim + 7), 8 * ii3_prim + 6);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i3_lb = max(8 * ii3_prim, i0 + 1), i3_ub = min(_PB_M - 1, 8 * ii3_prim + 7);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n B[i0][ir1] += (A[i3][i0] * B[i3][ir1]);\n }\n }\n }\n if (ii0_prim >= 1) {\n const int i0_lb = 8 * ii0_prim, i0_ub = min(_PB_M - 1, 8 * ii0_prim + 7);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n B[i0][ir1] = (alpha * B[i0][ir1]);\n }\n } else {\n const int i0_lb = 0, i0_ub = min(7, _PB_M - 1);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n B[i0][ir1] = (alpha * B[i0][ir1]);\n }\n }\n }\n } else {\n B[0][ir1] = (alpha * B[0][ir1]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/gemm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " ((x) < (y) ? (x) : (y))\n#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nfor (register int ir0 = 0; ir0 < _PB_NI; ir0 += 1) {\n for (register int ir2 = 0; ir2 < _PB_NJ; ir2 += 1) {\n C[ir0][ir2] *= beta;\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NK - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NK - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir0][ir2] += ((alpha * A[ir0][i2]) * B[i2][ir2]);\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/mvt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ling --omp-for-codegen --debug -b 8 */\n#define min(x,y) ((x) < (y) ? (x) : (y))\n#pragma scop\n{\n for (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 8;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_N - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n x1[ir1] = (x1[ir1] + (A[ir1][i2] * y_1[i2]));\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/mvt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "; i2 <= i2_ub; i2 += 1) {\n x1[ir1] = (x1[ir1] + (A[ir1][i2] * y_1[i2]));\n }\n }\n }\n for (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 8;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_N - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n x2[ir1] = (x2[ir1] + (A[i2][ir1] * y_2[i2]));\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/bicg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ") < (y) ? (x) : (y))\n#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\n{\n for (register int ir1 = 0; ir1 < _PB_M; ir1 += 1) {\n s[ir1] = 0;\n const int ii1_prim_lb = 0, ii1_prim_ub = floord(_PB_N - 1, 8);\n for (register int ii1_prim = ii1_prim_lb; ii1_prim <= ii1_prim_ub; ii1_prim += 1) {\n const int i1_lb = 8 * ii1_prim, i1_ub = min(_PB_N - 1, 8 * ii1_prim + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n s[ir1] = (s[ir1] + (r[i1] * A[i1][ir1]));\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/bicg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "1_lb; i1 <= i1_ub; i1 += 1) {\n s[ir1] = (s[ir1] + (r[i1] * A[i1][ir1]));\n }\n }\n }\n for (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n q[ir1] = SCALAR_VAL(0.0);\n const int ii3_prim_lb = 0, ii3_prim_ub = floord(_PB_M - 1, 8);\n for (register int ii3_prim = ii3_prim_lb; ii3_prim <= ii3_prim_ub; ii3_prim += 1) {\n const int i3_lb = 8 * ii3_prim, i3_ub = min(_PB_M - 1, 8 * ii3_prim + 7);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n q[ir1] = (q[ir1] + (A[ir1][i3] * p[i3]));\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/syr2k.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " ((x) < (y) ? (x) : (y))\n#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nfor (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n for (register int ir2 = 0; ir2 < _PB_N; ir2 += 1) {\n C[ir1][ir2] *= beta;\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_M - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir1][ir2] += (((A[ir2][i2] * alpha) * B[ir1][i2]) + ((B[ir2][i2] * alpha) * A[ir1][i2]));\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/gesummv.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "heduling --omp-for-codegen --debug -b 8 */\n#define min(x,y) ((x) < (y) ? (x) : (y))\n#pragma scop\nfor (register int ir0 = 0; ir0 < _PB_N; ir0 += 1) {\n {\n tmp[ir0] = SCALAR_VAL(0.0);\n y[ir0] = SCALAR_VAL(0.0);\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 8;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_N - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[ir0] = ((A[ir0][i2] * x[i2]) + tmp[ir0]);\n }\n const int i2_lb2 = 8 * ii2_prim, i2_ub2 = min(_PB_N - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb2; i2 <= i2_ub2; i2 += 1) {\n y[ir0] = ((B[ir0][i2] * x[i2]) + y[ir0]);\n }\n }\n }\n y[ir0] = ((alpha * tmp[ir0]) + (beta * y[ir0]));\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/syrk.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\n{\n const int ir0_lb = 0, ir0_ub = _PB_N - 7;\n for (register int ir0 = ir0_lb; ir0 < ir0_ub; ir0 += 1) {\n for (register int ir2 = 0; ir2 <= ir0; ir2 += 1) {\n C[ir0][ir2] *= beta;\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_M - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir0][ir2] += ((alpha * A[ir0][i2]) * A[ir2][i2]);\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/syrk.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[ir2][i2]);\n }\n }\n }\n }\n const int ir0_lb2 = max(0, _PB_N - 7), ir0_ub2 = _PB_N;\n for (register int ir0 = ir0_lb2; ir0 < ir0_ub2; ir0 += 1) {\n for (register int ir2 = 0; ir2 <= ir0; ir2 += 1) {\n if (_PB_N >= ((7 * ir0 + 7) % 8) + ir0 + 1) {\n C[ir0][ir2] *= beta;\n } else {\n C[ir0][ir2] *= beta;\n }\n if (_PB_N >= ((7 * ir0 + 7) % 8) + ir0 + 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_M - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir0][ir2] += ((alpha * A[ir0][i2]) * A[ir2][i2]);\n }\n }\n } else {\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_M - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir0][ir2] += ((alpha * A[ir0][i2]) * A[ir2][i2]);\n }\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/2mm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "y))\n#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nif (_PB_NJ >= 1) {\n for (register int ir1 = 0; ir1 < _PB_NI; ir1 += 1) {\n if (_PB_NL >= 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NJ - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NJ - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[ir1][i2] = SCALAR_VAL(0.0);\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NK - 1, 8);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NJ - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 8 * ii4_prim, i4_ub = min(_PB_NK - 1, 8 * ii4_prim + 7);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n tmp[ir1][i2] += ((alpha * A[ir1][i4]) * B[i4][i2]);\n }\n }\n }\n }\n } else {\n for (register int ir2 = 0; ir2 < _PB_NJ; ir2 += 1) {\n tmp[ir1][ir2] = SCALAR_VAL(0.0);\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NK - 1, 8);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i4_lb = 8 * ii4_prim, i4_ub = min(_PB_NK - 1, 8 * ii4_prim + 7);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n tmp[ir1][ir2] += ((alpha * A[ir1][i4]) * B[i4][ir2]);\n }\n }\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NL - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NL - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n D[ir1][i2] *= beta;\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NJ - 1, 8);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NL - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 8 * ii4_prim, i4_ub = min(_PB_NJ - 1, 8 * ii4_prim + 7);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n D[ir1][i2] += (tmp[ir1][i4] * C[i4][i2]);\n }\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/8/2mm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " D[ir1][i2] += (tmp[ir1][i4] * C[i4][i2]);\n }\n }\n }\n }\n }\n} else {\n for (register int ir1 = 0; ir1 < _PB_NI; ir1 += 1) {\n for (register int ir2 = 0; ir2 < _PB_NL; ir2 += 1) {\n D[ir1][ir2] *= beta;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/trmm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(x) < (y) ? (x) : (y))\n#define max(x,y) ((x) > (y) ? (x) : (y))\n#pragma scop\nif (_PB_M >= 1) {\n for (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n if (_PB_M >= 2) {\n const int ii0_prim_lb = 0, ii0_prim_ub = (_PB_M - 1) / 32;\n for (register int ii0_prim = ii0_prim_lb; ii0_prim <= ii0_prim_ub; ii0_prim += 1) {\n const int ii3_prim_lb = ii0_prim, ii3_prim_ub = (_PB_M - 1) / 32;\n for (register int ii3_prim = ii3_prim_lb; ii3_prim <= ii3_prim_ub; ii3_prim += 1) {\n const int i0_lb = 32 * ii0_prim, i0_ub = min(min(_PB_M - 2, 32 * ii0_prim + 31), 32 * ii3_prim + 30);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i3_lb = max(32 * ii3_prim, i0 + 1), i3_ub = min(_PB_M - 1, 32 * ii3_prim + 31);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n B[i0][ir1] += (A[i3][i0] * B[i3][ir1]);\n }\n }\n }\n if (ii0_prim >= 1) {\n const int i0_lb = 32 * ii0_prim, i0_ub = min(_PB_M - 1, 32 * ii0_prim + 31);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n B[i0][ir1] = (alpha * B[i0][ir1]);\n }\n } else {\n const int i0_lb = 0, i0_ub = min(31, _PB_M - 1);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n B[i0][ir1] = (alpha * B[i0][ir1]);\n }\n }\n }\n } else {\n B[0][ir1] = (alpha * B[0][ir1]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/gemm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " ((x) < (y) ? (x) : (y))\n#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nfor (register int ir0 = 0; ir0 < _PB_NI; ir0 += 1) {\n for (register int ir2 = 0; ir2 < _PB_NJ; ir2 += 1) {\n C[ir0][ir2] *= beta;\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NK - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NK - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir0][ir2] += ((alpha * A[ir0][i2]) * B[i2][ir2]);\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/mvt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ing --omp-for-codegen --debug -b 32 */\n#define min(x,y) ((x) < (y) ? (x) : (y))\n#pragma scop\n{\n for (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 32;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_N - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n x1[ir1] = (x1[ir1] + (A[ir1][i2] * y_1[i2]));\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/mvt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "; i2 <= i2_ub; i2 += 1) {\n x1[ir1] = (x1[ir1] + (A[ir1][i2] * y_1[i2]));\n }\n }\n }\n for (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 32;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_N - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n x2[ir1] = (x2[ir1] + (A[i2][ir1] * y_2[i2]));\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/bicg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ") < (y) ? (x) : (y))\n#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\n{\n for (register int ir1 = 0; ir1 < _PB_M; ir1 += 1) {\n s[ir1] = 0;\n const int ii1_prim_lb = 0, ii1_prim_ub = floord(_PB_N - 1, 32);\n for (register int ii1_prim = ii1_prim_lb; ii1_prim <= ii1_prim_ub; ii1_prim += 1) {\n const int i1_lb = 32 * ii1_prim, i1_ub = min(_PB_N - 1, 32 * ii1_prim + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n s[ir1] = (s[ir1] + (r[i1] * A[i1][ir1]));\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/bicg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "1_lb; i1 <= i1_ub; i1 += 1) {\n s[ir1] = (s[ir1] + (r[i1] * A[i1][ir1]));\n }\n }\n }\n for (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n q[ir1] = SCALAR_VAL(0.0);\n const int ii3_prim_lb = 0, ii3_prim_ub = floord(_PB_M - 1, 32);\n for (register int ii3_prim = ii3_prim_lb; ii3_prim <= ii3_prim_ub; ii3_prim += 1) {\n const int i3_lb = 32 * ii3_prim, i3_ub = min(_PB_M - 1, 32 * ii3_prim + 31);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n q[ir1] = (q[ir1] + (A[ir1][i3] * p[i3]));\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/syr2k.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " ((x) < (y) ? (x) : (y))\n#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nfor (register int ir1 = 0; ir1 < _PB_N; ir1 += 1) {\n for (register int ir2 = 0; ir2 < _PB_N; ir2 += 1) {\n C[ir1][ir2] *= beta;\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_M - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir1][ir2] += (((A[ir2][i2] * alpha) * B[ir1][i2]) + ((B[ir2][i2] * alpha) * A[ir1][i2]));\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/gesummv.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "eduling --omp-for-codegen --debug -b 32 */\n#define min(x,y) ((x) < (y) ? (x) : (y))\n#pragma scop\nfor (register int ir0 = 0; ir0 < _PB_N; ir0 += 1) {\n {\n tmp[ir0] = SCALAR_VAL(0.0);\n y[ir0] = SCALAR_VAL(0.0);\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 32;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_N - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[ir0] = ((A[ir0][i2] * x[i2]) + tmp[ir0]);\n }\n const int i2_lb2 = 32 * ii2_prim, i2_ub2 = min(_PB_N - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb2; i2 <= i2_ub2; i2 += 1) {\n y[ir0] = ((B[ir0][i2] * x[i2]) + y[ir0]);\n }\n }\n }\n y[ir0] = ((alpha * tmp[ir0]) + (beta * y[ir0]));\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/syrk.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\n{\n const int ir0_lb = 0, ir0_ub = _PB_N - 31;\n for (register int ir0 = ir0_lb; ir0 < ir0_ub; ir0 += 1) {\n for (register int ir2 = 0; ir2 <= ir0; ir2 += 1) {\n C[ir0][ir2] *= beta;\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_M - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir0][ir2] += ((alpha * A[ir0][i2]) * A[ir2][i2]);\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/syrk.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ir2][i2]);\n }\n }\n }\n }\n const int ir0_lb2 = max(0, _PB_N - 31), ir0_ub2 = _PB_N;\n for (register int ir0 = ir0_lb2; ir0 < ir0_ub2; ir0 += 1) {\n for (register int ir2 = 0; ir2 <= ir0; ir2 += 1) {\n if (_PB_N >= ((31 * ir0 + 31) % 32) + ir0 + 1) {\n C[ir0][ir2] *= beta;\n } else {\n C[ir0][ir2] *= beta;\n }\n if (_PB_N >= ((31 * ir0 + 31) % 32) + ir0 + 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_M - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir0][ir2] += ((alpha * A[ir0][i2]) * A[ir2][i2]);\n }\n }\n } else {\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_M - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[ir0][ir2] += ((alpha * A[ir0][i2]) * A[ir2][i2]);\n }\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/2mm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "y))\n#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nif (_PB_NJ >= 1) {\n for (register int ir1 = 0; ir1 < _PB_NI; ir1 += 1) {\n if (_PB_NL >= 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NJ - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NJ - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[ir1][i2] = SCALAR_VAL(0.0);\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NK - 1, 32);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NJ - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 32 * ii4_prim, i4_ub = min(_PB_NK - 1, 32 * ii4_prim + 31);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n tmp[ir1][i2] += ((alpha * A[ir1][i4]) * B[i4][i2]);\n }\n }\n }\n }\n } else {\n for (register int ir2 = 0; ir2 < _PB_NJ; ir2 += 1) {\n tmp[ir1][ir2] = SCALAR_VAL(0.0);\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NK - 1, 32);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i4_lb = 32 * ii4_prim, i4_ub = min(_PB_NK - 1, 32 * ii4_prim + 31);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n tmp[ir1][ir2] += ((alpha * A[ir1][i4]) * B[i4][ir2]);\n }\n }\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NL - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NL - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n D[ir1][i2] *= beta;\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NJ - 1, 32);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NL - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 32 * ii4_prim, i4_ub = min(_PB_NJ - 1, 32 * ii4_prim + 31);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n D[ir1][i2] += (tmp[ir1][i4] * C[i4][i2]);\n }\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-single/32/2mm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " D[ir1][i2] += (tmp[ir1][i4] * C[i4][i2]);\n }\n }\n }\n }\n }\n} else {\n for (register int ir1 = 0; ir1 < _PB_NI; ir1 += 1) {\n for (register int ir2 = 0; ir2 < _PB_NL; ir2 += 1) {\n D[ir1][ir2] *= beta;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/trmm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " : (n)/(d))\n#pragma scop\nif (_PB_M >= 1) {\n const int ii1_lb = 0, ii1_ub = floord(_PB_N - 1, 8);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n if (_PB_M >= 2) {\n const int ii0_prim_lb = 0, ii0_prim_ub = (_PB_M - 1) / 8;\n for (register int ii0_prim = ii0_prim_lb; ii0_prim <= ii0_prim_ub; ii0_prim += 1) {\n const int ii3_prim_lb = ii0_prim, ii3_prim_ub = (_PB_M - 1) / 8;\n for (register int ii3_prim = ii3_prim_lb; ii3_prim <= ii3_prim_ub; ii3_prim += 1) {\n const int i0_lb = 8 * ii0_prim, i0_ub = min(min(_PB_M - 2, 8 * ii0_prim + 7), 8 * ii3_prim + 6);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i3_lb = max(8 * ii3_prim, i0 + 1), i3_ub = min(_PB_M - 1, 8 * ii3_prim + 7);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n B[i0][i1] += (A[i3][i0] * B[i3][i1]);\n }\n }\n }\n }\n if (ii0_prim >= 1) {\n const int i0_lb = 8 * ii0_prim, i0_ub = min(_PB_M - 1, 8 * ii0_prim + 7);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n B[i0][i1] = (alpha * B[i0][i1]);\n }\n }\n } else {\n const int i0_lb = 0, i0_ub = min(7, _PB_M - 1);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n B[i0][i1] = (alpha * B[i0][i1]);\n }\n }\n }\n }\n } else {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n B[0][i1] = (alpha * B[0][i1]);\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/gemm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nconst int ii0_lb = 0, ii0_ub = floord(_PB_NI - 1, 8);\nfor (register int ii0 = ii0_lb; ii0 <= ii0_ub; ii0 += 1) {\n const int ii2_lb = 0, ii2_ub = floord(_PB_NJ - 1, 8);\n for (register int ii2 = ii2_lb; ii2 <= ii2_ub; ii2 += 1) {\n const int i0_lb = 8 * ii0, i0_ub = min(_PB_NI - 1, 8 * ii0 + 7);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 8 * ii2, i2_ub = min(_PB_NJ - 1, 8 * ii2 + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[i0][i2] *= beta;\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NK - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i0_lb = 8 * ii0, i0_ub = min(_PB_NI - 1, 8 * ii0 + 7);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NK - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i3_lb = 8 * ii2, i3_ub = min(_PB_NJ - 1, 8 * ii2 + 7);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n C[i0][i3] += ((alpha * A[i0][i2]) * B[i2][i3]);\n }\n }\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/mvt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\n{\n const int ii1_lb = 0, ii1_ub = floord(_PB_N - 1, 8);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 8;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_N - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n x1[i1] = (x1[i1] + (A[i1][i2] * y_1[i2]));\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/mvt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* y_1[i2]));\n }\n }\n }\n }\n const int ii1_lb2 = 0, ii1_ub2 = floord(_PB_N - 1, 8);\n for (register int ii1 = ii1_lb2; ii1 <= ii1_ub2; ii1 += 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 8;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_N - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n x2[i1] = (x2[i1] + (A[i2][i1] * y_2[i2]));\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/bicg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\n{\n const int ii1_lb = 0, ii1_ub = floord(_PB_M - 1, 8);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_M - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n s[i1] = 0;\n }\n const int ii1_prim_lb = 0, ii1_prim_ub = floord(_PB_N - 1, 8);\n for (register int ii1_prim = ii1_prim_lb; ii1_prim <= ii1_prim_ub; ii1_prim += 1) {\n const int i1_lb = 8 * ii1_prim, i1_ub = min(_PB_N - 1, 8 * ii1_prim + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i3_lb = 8 * ii1, i3_ub = min(_PB_M - 1, 8 * ii1 + 7);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n s[i3] = (s[i3] + (r[i1] * A[i1][i3]));\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/bicg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "A[i1][i3]));\n }\n }\n }\n }\n const int ii1_lb2 = 0, ii1_ub2 = floord(_PB_N - 1, 8);\n for (register int ii1 = ii1_lb2; ii1 <= ii1_ub2; ii1 += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n q[i1] = SCALAR_VAL(0.0);\n }\n const int ii3_prim_lb = 0, ii3_prim_ub = floord(_PB_M - 1, 8);\n for (register int ii3_prim = ii3_prim_lb; ii3_prim <= ii3_prim_ub; ii3_prim += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i3_lb = 8 * ii3_prim, i3_ub = min(_PB_M - 1, 8 * ii3_prim + 7);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n q[i1] = (q[i1] + (A[i1][i3] * p[i3]));\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/syr2k.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nconst int ii1_lb = 0, ii1_ub = floord(_PB_N - 1, 8);\nfor (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n const int ii2_lb = 0, ii2_ub = (_PB_N - 1) / 8;\n for (register int ii2 = ii2_lb; ii2 <= ii2_ub; ii2 += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2, i2_ub = min(_PB_N - 1, 8 * ii2 + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[i1][i2] *= beta;\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_N - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_M - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i3_lb = 8 * ii2, i3_ub = min(_PB_N - 1, 8 * ii2 + 7);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n C[i1][i3] += (((A[i3][i2] * alpha) * B[i1][i2]) + ((B[i3][i2] * alpha) * A[i1][i2]));\n }\n }\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/gesummv.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nconst int ii0_lb = 0, ii0_ub = floord(_PB_N - 1, 8);\nfor (register int ii0 = ii0_lb; ii0 <= ii0_ub; ii0 += 1) {\n {\n const int i0_lb = 8 * ii0, i0_ub = min(_PB_N - 1, 8 * ii0 + 7);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n tmp[i0] = SCALAR_VAL(0.0);\n }\n const int i0_lb2 = 8 * ii0, i0_ub2 = min(_PB_N - 1, 8 * ii0 + 7);\n for (register int i0 = i0_lb2; i0 <= i0_ub2; i0 += 1) {\n y[i0] = SCALAR_VAL(0.0);\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 8;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i0_lb = 8 * ii0, i0_ub = min(_PB_N - 1, 8 * ii0 + 7);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_N - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[i0] = ((A[i0][i2] * x[i2]) + tmp[i0]);\n }\n }\n const int i0_lb2 = 8 * ii0, i0_ub2 = min(_PB_N - 1, 8 * ii0 + 7);\n for (register int i0 = i0_lb2; i0 <= i0_ub2; i0 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_N - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n y[i0] = ((B[i0][i2] * x[i2]) + y[i0]);\n }\n }\n }\n }\n const int i0_lb = 8 * ii0, i0_ub = min(_PB_N - 1, 8 * ii0 + 7);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n y[i0] = ((alpha * tmp[i0]) + (beta * y[i0]));\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/syrk.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nconst int ii0_lb = 0, ii0_ub = floord(_PB_N - 1, 8);\nfor (register int ii0 = ii0_lb; ii0 <= ii0_ub; ii0 += 1) {\n for (register int ii2 = 0; ii2 <= ii0; ii2 += 1) {\n const int i0_lb = 8 * ii0, i0_ub = min(_PB_N - 1, 8 * ii0 + 7);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 8 * ii2, i2_ub = min(8 * ii2 + 7, i0);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[i0][i2] *= beta;\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i0_lb = 8 * ii0, i0_ub = min(_PB_N - 1, 8 * ii0 + 7);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_M - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i3_lb = 8 * ii2, i3_ub = min(8 * ii2 + 7, i0);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n C[i0][i3] += ((alpha * A[i0][i2]) * A[i3][i2]);\n }\n }\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/2mm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (n)/(d))\n#pragma scop\nif (_PB_NJ >= 1) {\n const int ii1_lb = 0, ii1_ub = floord(_PB_NI - 1, 8);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n if (_PB_NL >= 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NJ - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_NI - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NJ - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[i1][i2] = SCALAR_VAL(0.0);\n }\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NK - 1, 8);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_NI - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NJ - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 8 * ii4_prim, i4_ub = min(_PB_NK - 1, 8 * ii4_prim + 7);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n tmp[i1][i2] += ((alpha * A[i1][i4]) * B[i4][i2]);\n }\n }\n }\n }\n }\n } else {\n const int ii2_lb = 0, ii2_ub = floord(_PB_NJ - 1, 8);\n for (register int ii2 = ii2_lb; ii2 <= ii2_ub; ii2 += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_NI - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2, i2_ub = min(_PB_NJ - 1, 8 * ii2 + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[i1][i2] = SCALAR_VAL(0.0);\n }\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NK - 1, 8);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_NI - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2, i2_ub = min(_PB_NJ - 1, 8 * ii2 + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 8 * ii4_prim, i4_ub = min(_PB_NK - 1, 8 * ii4_prim + 7);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n tmp[i1][i2] += ((alpha * A[i1][i4]) * B[i4][i2]);\n }\n }\n }\n }\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NL - 1, 8);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_NI - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NL - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n D[i1][i2] *= beta;\n }\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NJ - 1, 8);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_NI - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2_prim, i2_ub = min(_PB_NL - 1, 8 * ii2_prim + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 8 * ii4_prim, i4_ub = min(_PB_NJ - 1, 8 * ii4_prim + 7);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n D[i1][i2] += (tmp[i1][i4] * C[i4][i2]);\n }\n }\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/8/2mm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n }\n }\n }\n }\n} else {\n const int ii1_lb = 0, ii1_ub = floord(_PB_NI - 1, 8);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n const int ii2_lb = 0, ii2_ub = floord(_PB_NL - 1, 8);\n for (register int ii2 = ii2_lb; ii2 <= ii2_ub; ii2 += 1) {\n const int i1_lb = 8 * ii1, i1_ub = min(_PB_NI - 1, 8 * ii1 + 7);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 8 * ii2, i2_ub = min(_PB_NL - 1, 8 * ii2 + 7);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n D[i1][i2] *= beta;\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/trmm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ": (n)/(d))\n#pragma scop\nif (_PB_M >= 1) {\n const int ii1_lb = 0, ii1_ub = floord(_PB_N - 1, 32);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n if (_PB_M >= 2) {\n const int ii0_prim_lb = 0, ii0_prim_ub = (_PB_M - 1) / 32;\n for (register int ii0_prim = ii0_prim_lb; ii0_prim <= ii0_prim_ub; ii0_prim += 1) {\n const int ii3_prim_lb = ii0_prim, ii3_prim_ub = (_PB_M - 1) / 32;\n for (register int ii3_prim = ii3_prim_lb; ii3_prim <= ii3_prim_ub; ii3_prim += 1) {\n const int i0_lb = 32 * ii0_prim, i0_ub = min(min(_PB_M - 2, 32 * ii0_prim + 31), 32 * ii3_prim + 30);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i3_lb = max(32 * ii3_prim, i0 + 1), i3_ub = min(_PB_M - 1, 32 * ii3_prim + 31);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n B[i0][i1] += (A[i3][i0] * B[i3][i1]);\n }\n }\n }\n }\n if (ii0_prim >= 1) {\n const int i0_lb = 32 * ii0_prim, i0_ub = min(_PB_M - 1, 32 * ii0_prim + 31);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n B[i0][i1] = (alpha * B[i0][i1]);\n }\n }\n } else {\n const int i0_lb = 0, i0_ub = min(31, _PB_M - 1);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n B[i0][i1] = (alpha * B[i0][i1]);\n }\n }\n }\n }\n } else {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n B[0][i1] = (alpha * B[0][i1]);\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/gemm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nconst int ii0_lb = 0, ii0_ub = floord(_PB_NI - 1, 32);\nfor (register int ii0 = ii0_lb; ii0 <= ii0_ub; ii0 += 1) {\n const int ii2_lb = 0, ii2_ub = floord(_PB_NJ - 1, 32);\n for (register int ii2 = ii2_lb; ii2 <= ii2_ub; ii2 += 1) {\n const int i0_lb = 32 * ii0, i0_ub = min(_PB_NI - 1, 32 * ii0 + 31);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 32 * ii2, i2_ub = min(_PB_NJ - 1, 32 * ii2 + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[i0][i2] *= beta;\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NK - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i0_lb = 32 * ii0, i0_ub = min(_PB_NI - 1, 32 * ii0 + 31);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NK - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i3_lb = 32 * ii2, i3_ub = min(_PB_NJ - 1, 32 * ii2 + 31);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n C[i0][i3] += ((alpha * A[i0][i2]) * B[i2][i3]);\n }\n }\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/mvt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\n{\n const int ii1_lb = 0, ii1_ub = floord(_PB_N - 1, 32);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 32;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_N - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n x1[i1] = (x1[i1] + (A[i1][i2] * y_1[i2]));\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/mvt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " y_1[i2]));\n }\n }\n }\n }\n const int ii1_lb2 = 0, ii1_ub2 = floord(_PB_N - 1, 32);\n for (register int ii1 = ii1_lb2; ii1 <= ii1_ub2; ii1 += 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 32;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_N - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n x2[i1] = (x2[i1] + (A[i2][i1] * y_2[i2]));\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/bicg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\n{\n const int ii1_lb = 0, ii1_ub = floord(_PB_M - 1, 32);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_M - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n s[i1] = 0;\n }\n const int ii1_prim_lb = 0, ii1_prim_ub = floord(_PB_N - 1, 32);\n for (register int ii1_prim = ii1_prim_lb; ii1_prim <= ii1_prim_ub; ii1_prim += 1) {\n const int i1_lb = 32 * ii1_prim, i1_ub = min(_PB_N - 1, 32 * ii1_prim + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i3_lb = 32 * ii1, i3_ub = min(_PB_M - 1, 32 * ii1 + 31);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n s[i3] = (s[i3] + (r[i1] * A[i1][i3]));\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/bicg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[i1][i3]));\n }\n }\n }\n }\n const int ii1_lb2 = 0, ii1_ub2 = floord(_PB_N - 1, 32);\n for (register int ii1 = ii1_lb2; ii1 <= ii1_ub2; ii1 += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n q[i1] = SCALAR_VAL(0.0);\n }\n const int ii3_prim_lb = 0, ii3_prim_ub = floord(_PB_M - 1, 32);\n for (register int ii3_prim = ii3_prim_lb; ii3_prim <= ii3_prim_ub; ii3_prim += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i3_lb = 32 * ii3_prim, i3_ub = min(_PB_M - 1, 32 * ii3_prim + 31);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n q[i1] = (q[i1] + (A[i1][i3] * p[i3]));\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/syr2k.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nconst int ii1_lb = 0, ii1_ub = floord(_PB_N - 1, 32);\nfor (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n const int ii2_lb = 0, ii2_ub = (_PB_N - 1) / 32;\n for (register int ii2 = ii2_lb; ii2 <= ii2_ub; ii2 += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2, i2_ub = min(_PB_N - 1, 32 * ii2 + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[i1][i2] *= beta;\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_N - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_M - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i3_lb = 32 * ii2, i3_ub = min(_PB_N - 1, 32 * ii2 + 31);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n C[i1][i3] += (((A[i3][i2] * alpha) * B[i1][i2]) + ((B[i3][i2] * alpha) * A[i1][i2]));\n }\n }\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/gesummv.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nconst int ii0_lb = 0, ii0_ub = floord(_PB_N - 1, 32);\nfor (register int ii0 = ii0_lb; ii0 <= ii0_ub; ii0 += 1) {\n {\n const int i0_lb = 32 * ii0, i0_ub = min(_PB_N - 1, 32 * ii0 + 31);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n tmp[i0] = SCALAR_VAL(0.0);\n }\n const int i0_lb2 = 32 * ii0, i0_ub2 = min(_PB_N - 1, 32 * ii0 + 31);\n for (register int i0 = i0_lb2; i0 <= i0_ub2; i0 += 1) {\n y[i0] = SCALAR_VAL(0.0);\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = (_PB_N - 1) / 32;\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i0_lb = 32 * ii0, i0_ub = min(_PB_N - 1, 32 * ii0 + 31);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_N - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[i0] = ((A[i0][i2] * x[i2]) + tmp[i0]);\n }\n }\n const int i0_lb2 = 32 * ii0, i0_ub2 = min(_PB_N - 1, 32 * ii0 + 31);\n for (register int i0 = i0_lb2; i0 <= i0_ub2; i0 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_N - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n y[i0] = ((B[i0][i2] * x[i2]) + y[i0]);\n }\n }\n }\n }\n const int i0_lb = 32 * ii0, i0_ub = min(_PB_N - 1, 32 * ii0 + 31);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n y[i0] = ((alpha * tmp[i0]) + (beta * y[i0]));\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/syrk.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\nconst int ii0_lb = 0, ii0_ub = floord(_PB_N - 1, 32);\nfor (register int ii0 = ii0_lb; ii0 <= ii0_ub; ii0 += 1) {\n for (register int ii2 = 0; ii2 <= ii0; ii2 += 1) {\n const int i0_lb = 32 * ii0, i0_ub = min(_PB_N - 1, 32 * ii0 + 31);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 32 * ii2, i2_ub = min(32 * ii2 + 31, i0);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n C[i0][i2] *= beta;\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_M - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i0_lb = 32 * ii0, i0_ub = min(_PB_N - 1, 32 * ii0 + 31);\n for (register int i0 = i0_lb; i0 <= i0_ub; i0 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_M - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i3_lb = 32 * ii2, i3_ub = min(32 * ii2 + 31, i0);\n for (register int i3 = i3_lb; i3 <= i3_ub; i3 += 1) {\n C[i0][i3] += ((alpha * A[i0][i2]) * A[i3][i2]);\n }\n }\n }\n }\n }\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/2mm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(n)/(d))\n#pragma scop\nif (_PB_NJ >= 1) {\n const int ii1_lb = 0, ii1_ub = floord(_PB_NI - 1, 32);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n if (_PB_NL >= 1) {\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NJ - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_NI - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NJ - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[i1][i2] = SCALAR_VAL(0.0);\n }\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NK - 1, 32);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_NI - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NJ - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 32 * ii4_prim, i4_ub = min(_PB_NK - 1, 32 * ii4_prim + 31);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n tmp[i1][i2] += ((alpha * A[i1][i4]) * B[i4][i2]);\n }\n }\n }\n }\n }\n } else {\n const int ii2_lb = 0, ii2_ub = floord(_PB_NJ - 1, 32);\n for (register int ii2 = ii2_lb; ii2 <= ii2_ub; ii2 += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_NI - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2, i2_ub = min(_PB_NJ - 1, 32 * ii2 + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n tmp[i1][i2] = SCALAR_VAL(0.0);\n }\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NK - 1, 32);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_NI - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2, i2_ub = min(_PB_NJ - 1, 32 * ii2 + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 32 * ii4_prim, i4_ub = min(_PB_NK - 1, 32 * ii4_prim + 31);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n tmp[i1][i2] += ((alpha * A[i1][i4]) * B[i4][i2]);\n }\n }\n }\n }\n }\n }\n const int ii2_prim_lb = 0, ii2_prim_ub = floord(_PB_NL - 1, 32);\n for (register int ii2_prim = ii2_prim_lb; ii2_prim <= ii2_prim_ub; ii2_prim += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_NI - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NL - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n D[i1][i2] *= beta;\n }\n }\n const int ii4_prim_lb = 0, ii4_prim_ub = floord(_PB_NJ - 1, 32);\n for (register int ii4_prim = ii4_prim_lb; ii4_prim <= ii4_prim_ub; ii4_prim += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_NI - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2_prim, i2_ub = min(_PB_NL - 1, 32 * ii2_prim + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n const int i4_lb = 32 * ii4_prim, i4_ub = min(_PB_NJ - 1, 32 * ii4_prim + 31);\n for (register int i4 = i4_lb; i4 <= i4_ub; i4 += 1) {\n D[i1][i2] += (tmp[i1][i4] * C[i4][i2]);\n }\n }\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/tc-multi/32/2mm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n }\n }\n }\n }\n} else {\n const int ii1_lb = 0, ii1_ub = floord(_PB_NI - 1, 32);\n for (register int ii1 = ii1_lb; ii1 <= ii1_ub; ii1 += 1) {\n const int ii2_lb = 0, ii2_ub = floord(_PB_NL - 1, 32);\n for (register int ii2 = ii2_lb; ii2 <= ii2_ub; ii2 += 1) {\n const int i1_lb = 32 * ii1, i1_ub = min(_PB_NI - 1, 32 * ii1 + 31);\n for (register int i1 = i1_lb; i1 <= i1_ub; i1 += 1) {\n const int i2_lb = 32 * ii2, i2_ub = min(_PB_NL - 1, 32 * ii2 + 31);\n for (register int i2 = i2_lb; i2 <= i2_ub; i2 += 1) {\n D[i1][i2] *= beta;\n }\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/trmm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "de */\nif ((_PB_M >= 1) && (_PB_N >= 1)) {\n if (_PB_M >= 2) {\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_M-2,8);t3++) {\n for (t4=t3;t4<=floord(_PB_M-1,8);t4++) {\n for (t5=8*t3;t5<=min(min(_PB_M-2,8*t3+7),8*t4+6);t5++) {\n for (t6=max(8*t4,t5+1);t6<=min(_PB_M-1,8*t4+7);t6++) {\n lbv=8*t2;\n ubv=min(_PB_N-1,8*t2+7);\n#pragma ivdep\n#pragma vector always\n for (t7=lbv;t7<=ubv;t7++) {\n B[t5][t7] += A[t6][t5] * B[t6][t7];;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/trmm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n }\n }\n lbp=0;\n ubp=floord(_PB_M-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,8);t3++) {\n for (t4=8*t2;t4<=min(_PB_M-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=min(_PB_N-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n B[t4][t5] = alpha * B[t4][t5];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/gemm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "\n/* Start of CLooG code */\nif ((_PB_NI >= 1) && (_PB_NJ >= 1)) {\n lbp=0;\n ubp=floord(_PB_NI-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_NJ-1,8);t3++) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=min(_PB_NJ-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n C[t4][t5] *= beta;;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/gemm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "] *= beta;;\n }\n }\n }\n }\n if (_PB_NK >= 1) {\n lbp=0;\n ubp=floord(_PB_NI-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_NJ-1,8);t3++) {\n for (t4=0;t4<=floord(_PB_NK-1,8);t4++) {\n for (t5=8*t2;t5<=min(_PB_NI-1,8*t2+7);t5++) {\n for (t6=8*t4;t6<=min(_PB_NK-1,8*t4+7);t6++) {\n lbv=8*t3;\n ubv=min(_PB_NJ-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t7=lbv;t7<=ubv;t7++) {\n C[t5][t7] += alpha * A[t5][t6] * B[t6][t7];;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/mvt.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t2,t3,t4)", "context_chars": 100, "text": "egister int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t1=lbp;t1<=ubp;t1++) {\n for (t2=0;t2<=floord(_PB_N-1,8);t2++) {\n for (t3=8*t1;t3<=min(_PB_N-1,8*t1+7);t3++) {\n for (t4=8*t2;t4<=min(_PB_N-1,8*t2+7);t4++) {\n x1[t3] = x1[t3] + A[t3][t4] * y_1[t4];;\n x2[t3] = x2[t3] + A[t4][t3] * y_2[t4];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t2,t3,t4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/bicg.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "lbp, ubp, lb2, ub2;\n register int lbv, ubv;\n/* Start of CLooG code */\nlbp=0;\nubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=8*t2;\n ubv=min(_PB_N-1,8*t2+7);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n q[t3] = SCALAR_VAL(0.0);;\n }\n} #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/bicg.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "ubv;t3++) {\n q[t3] = SCALAR_VAL(0.0);;\n }\n}\nif (_PB_M >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_M-1,8);t3++) {\n for (t4=8*t2;t4<=min(_PB_N-1,8*t2+7);t4++) {\n for (t5=8*t3;t5<=min(_PB_M-1,8*t3+7);t5++) {\n q[t4] = q[t4] + A[t4][t5] * p[t5];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/bicg.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": " q[t4] = q[t4] + A[t4][t5] * p[t5];;\n }\n }\n }\n }\n}\nlbp=0;\nubp=floord(_PB_M-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=8*t2;\n ubv=min(_PB_M-1,8*t2+7);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n s[t3] = 0;;\n }\n} #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/bicg.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "r (t3=lbv;t3<=ubv;t3++) {\n s[t3] = 0;;\n }\n}\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_M-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,8);t3++) {\n for (t4=8*t3;t4<=min(_PB_N-1,8*t3+7);t4++) {\n lbv=8*t2;\n ubv=min(_PB_M-1,8*t2+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n s[t5] = s[t5] + r[t4] * A[t4][t5];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/syr2k.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "egister int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,8);t3++) {\n for (t4=8*t2;t4<=min(_PB_N-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=min(_PB_N-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n C[t4][t5] *= beta;;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/syr2k.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "t5] *= beta;;\n }\n }\n }\n }\n if (_PB_M >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,8);t3++) {\n for (t4=0;t4<=floord(_PB_M-1,8);t4++) {\n for (t5=8*t2;t5<=min(_PB_N-1,8*t2+7);t5++) {\n for (t6=8*t3;t6<=min(_PB_N-1,8*t3+7);t6++) {\n for (t7=8*t4;t7<=min(_PB_M-1,8*t4+7);t7++) {\n C[t5][t6] += A[t6][t7] * alpha * B[t5][t7] + B[t6][t7] * alpha * A[t5][t7];;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "egister int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=8*t2;\n ubv=min(_PB_N-1,8*t2+7);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n y[t3] = SCALAR_VAL(0.0);;\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "(t3=lbv;t3<=ubv;t3++) {\n y[t3] = SCALAR_VAL(0.0);;\n }\n }\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,8);t3++) {\n for (t4=8*t2;t4<=min(_PB_N-1,8*t2+7);t4++) {\n for (t5=8*t3;t5<=min(_PB_N-1,8*t3+7);t5++) {\n y[t4] = B[t4][t5] * x[t5] + y[t4];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": " y[t4] = B[t4][t5] * x[t5] + y[t4];;\n }\n }\n }\n }\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=8*t2;\n ubv=min(_PB_N-1,8*t2+7);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n tmp[t3] = SCALAR_VAL(0.0);;\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "3=lbv;t3<=ubv;t3++) {\n tmp[t3] = SCALAR_VAL(0.0);;\n }\n }\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,8);t3++) {\n for (t4=8*t2;t4<=min(_PB_N-1,8*t2+7);t4++) {\n for (t5=8*t3;t5<=min(_PB_N-1,8*t3+7);t5++) {\n tmp[t4] = A[t4][t5] * x[t5] + tmp[t4];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "p[t4] = A[t4][t5] * x[t5] + tmp[t4];;\n }\n }\n }\n }\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=8*t2;\n ubv=min(_PB_N-1,8*t2+7);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n y[t3] = alpha * tmp[t3] + beta * y[t3];;\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/syrk.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "egister int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=t2;t3++) {\n for (t4=8*t2;t4<=min(_PB_N-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=min(t4,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n C[t4][t5] *= beta;;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/syrk.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "t5] *= beta;;\n }\n }\n }\n }\n if (_PB_M >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=t2;t3++) {\n for (t4=0;t4<=floord(_PB_M-1,8);t4++) {\n for (t5=8*t2;t5<=min(_PB_N-1,8*t2+7);t5++) {\n for (t6=8*t3;t6<=min(t5,8*t3+7);t6++) {\n for (t7=8*t4;t7<=min(_PB_M-1,8*t4+7);t7++) {\n C[t5][t6] += alpha * A[t5][t7] * A[t6][t7];;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/2mm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8,t9)", "context_chars": 100, "text": "ister int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_NI >= 1) {\n lbp=0;\n ubp=floord(_PB_NI-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n if ((_PB_NJ >= 0) && (_PB_NL >= 0)) {\n for (t3=0;t3<=floord(_PB_NJ+_PB_NL-1,8);t3++) {\n if ((_PB_NJ >= _PB_NL+1) && (t3 <= floord(_PB_NL-1,8)) && (t3 >= ceild(_PB_NL-7,8))) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=_PB_NL-1;\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n lbv=_PB_NL;\n ubv=min(_PB_NJ-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((_PB_NJ >= _PB_NL+1) && (t3 <= floord(_PB_NL-8,8))) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=8*t3+7;\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((_PB_NJ <= _PB_NL-1) && (t3 <= floord(_PB_NJ-1,8)) && (t3 >= ceild(_PB_NJ-7,8))) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=_PB_NJ-1;\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n lbv=_PB_NJ;\n ubv=min(_PB_NL-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n }\n }\n }\n if ((_PB_NJ <= _PB_NL-1) && (t3 <= floord(_PB_NJ-8,8))) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=8*t3+7;\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((_PB_NJ == _PB_NL) && (t3 <= floord(_PB_NJ-1,8))) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=min(_PB_NJ-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((t3 <= floord(_PB_NJ-1,8)) && (t3 >= ceild(_PB_NL,8))) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=min(_PB_NJ-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((t3 <= floord(_PB_NL-1,8)) && (t3 >= ceild(_PB_NJ,8))) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=min(_PB_NL-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n }\n }\n }\n }\n }\n if (_PB_NL <= -1) {\n for (t3=0;t3<=floord(_PB_NJ-1,8);t3++) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=min(_PB_NJ-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n }\n if (_PB_NJ <= -1) {\n for (t3=0;t3<=floord(_PB_NL-1,8);t3++) {\n for (t4=8*t2;t4<=min(_PB_NI-1,8*t2+7);t4++) {\n lbv=8*t3;\n ubv=min(_PB_NL-1,8*t3+7);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8,t9)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/8/2mm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8,t9)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n if (_PB_NJ >= 1) {\n lbp=0;\n ubp=floord(_PB_NI-1,8);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_NJ-1,8);t3++) {\n if (_PB_NK >= 1) {\n for (t5=0;t5<=floord(_PB_NK-1,8);t5++) {\n for (t6=8*t2;t6<=min(_PB_NI-1,8*t2+7);t6++) {\n for (t7=8*t3;t7<=min(_PB_NJ-1,8*t3+7);t7++) {\n for (t9=8*t5;t9<=min(_PB_NK-1,8*t5+7);t9++) {\n tmp[t6][t7] += alpha * A[t6][t9] * B[t9][t7];;\n }\n }\n }\n }\n }\n if (_PB_NL >= 1) {\n for (t5=0;t5<=floord(_PB_NL-1,8);t5++) {\n for (t6=8*t2;t6<=min(_PB_NI-1,8*t2+7);t6++) {\n for (t7=8*t3;t7<=min(_PB_NJ-1,8*t3+7);t7++) {\n lbv=8*t5;\n ubv=min(_PB_NL-1,8*t5+7);\n#pragma ivdep\n#pragma vector always\n for (t9=lbv;t9<=ubv;t9++) {\n D[t6][t9] += tmp[t6][t7] * C[t7][t9];;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8,t9)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/trmm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "e */\nif ((_PB_M >= 1) && (_PB_N >= 1)) {\n if (_PB_M >= 2) {\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_M-2,32);t3++) {\n for (t4=t3;t4<=floord(_PB_M-1,32);t4++) {\n for (t5=32*t3;t5<=min(min(_PB_M-2,32*t3+31),32*t4+30);t5++) {\n for (t6=max(32*t4,t5+1);t6<=min(_PB_M-1,32*t4+31);t6++) {\n lbv=32*t2;\n ubv=min(_PB_N-1,32*t2+31);\n#pragma ivdep\n#pragma vector always\n for (t7=lbv;t7<=ubv;t7++) {\n B[t5][t7] += A[t6][t5] * B[t6][t7];;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/trmm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n }\n }\n lbp=0;\n ubp=floord(_PB_M-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,32);t3++) {\n for (t4=32*t2;t4<=min(_PB_M-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=min(_PB_N-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n B[t4][t5] = alpha * B[t4][t5];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/gemm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "/* Start of CLooG code */\nif ((_PB_NI >= 1) && (_PB_NJ >= 1)) {\n lbp=0;\n ubp=floord(_PB_NI-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_NJ-1,32);t3++) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=min(_PB_NJ-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n C[t4][t5] *= beta;;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/gemm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": " *= beta;;\n }\n }\n }\n }\n if (_PB_NK >= 1) {\n lbp=0;\n ubp=floord(_PB_NI-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_NJ-1,32);t3++) {\n for (t4=0;t4<=floord(_PB_NK-1,32);t4++) {\n for (t5=32*t2;t5<=min(_PB_NI-1,32*t2+31);t5++) {\n for (t6=32*t4;t6<=min(_PB_NK-1,32*t4+31);t6++) {\n lbv=32*t3;\n ubv=min(_PB_NJ-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t7=lbv;t7<=ubv;t7++) {\n C[t5][t7] += alpha * A[t5][t6] * B[t6][t7];;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/mvt.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t2,t3,t4)", "context_chars": 100, "text": "gister int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t1=lbp;t1<=ubp;t1++) {\n for (t2=0;t2<=floord(_PB_N-1,32);t2++) {\n for (t3=32*t1;t3<=min(_PB_N-1,32*t1+31);t3++) {\n for (t4=32*t2;t4<=min(_PB_N-1,32*t2+31);t4++) {\n x1[t3] = x1[t3] + A[t3][t4] * y_1[t4];;\n x2[t3] = x2[t3] + A[t4][t3] * y_2[t4];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t2,t3,t4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/bicg.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "bp, ubp, lb2, ub2;\n register int lbv, ubv;\n/* Start of CLooG code */\nlbp=0;\nubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=32*t2;\n ubv=min(_PB_N-1,32*t2+31);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n q[t3] = SCALAR_VAL(0.0);;\n }\n} #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/bicg.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "bv;t3++) {\n q[t3] = SCALAR_VAL(0.0);;\n }\n}\nif (_PB_M >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_M-1,32);t3++) {\n for (t4=32*t2;t4<=min(_PB_N-1,32*t2+31);t4++) {\n for (t5=32*t3;t5<=min(_PB_M-1,32*t3+31);t5++) {\n q[t4] = q[t4] + A[t4][t5] * p[t5];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/bicg.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": " q[t4] = q[t4] + A[t4][t5] * p[t5];;\n }\n }\n }\n }\n}\nlbp=0;\nubp=floord(_PB_M-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=32*t2;\n ubv=min(_PB_M-1,32*t2+31);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n s[t3] = 0;;\n }\n} #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/bicg.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": " (t3=lbv;t3<=ubv;t3++) {\n s[t3] = 0;;\n }\n}\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_M-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,32);t3++) {\n for (t4=32*t3;t4<=min(_PB_N-1,32*t3+31);t4++) {\n lbv=32*t2;\n ubv=min(_PB_M-1,32*t2+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n s[t5] = s[t5] + r[t4] * A[t4][t5];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/syr2k.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "gister int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,32);t3++) {\n for (t4=32*t2;t4<=min(_PB_N-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=min(_PB_N-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n C[t4][t5] *= beta;;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/syr2k.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "5] *= beta;;\n }\n }\n }\n }\n if (_PB_M >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,32);t3++) {\n for (t4=0;t4<=floord(_PB_M-1,32);t4++) {\n for (t5=32*t2;t5<=min(_PB_N-1,32*t2+31);t5++) {\n for (t6=32*t3;t6<=min(_PB_N-1,32*t3+31);t6++) {\n for (t7=32*t4;t7<=min(_PB_M-1,32*t4+31);t7++) {\n C[t5][t6] += A[t6][t7] * alpha * B[t5][t7] + B[t6][t7] * alpha * A[t5][t7];;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "gister int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=32*t2;\n ubv=min(_PB_N-1,32*t2+31);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n y[t3] = SCALAR_VAL(0.0);;\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "t3=lbv;t3<=ubv;t3++) {\n y[t3] = SCALAR_VAL(0.0);;\n }\n }\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,32);t3++) {\n for (t4=32*t2;t4<=min(_PB_N-1,32*t2+31);t4++) {\n for (t5=32*t3;t5<=min(_PB_N-1,32*t3+31);t5++) {\n y[t4] = B[t4][t5] * x[t5] + y[t4];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": " y[t4] = B[t4][t5] * x[t5] + y[t4];;\n }\n }\n }\n }\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=32*t2;\n ubv=min(_PB_N-1,32*t2+31);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n tmp[t3] = SCALAR_VAL(0.0);;\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "=lbv;t3<=ubv;t3++) {\n tmp[t3] = SCALAR_VAL(0.0);;\n }\n }\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_N-1,32);t3++) {\n for (t4=32*t2;t4<=min(_PB_N-1,32*t2+31);t4++) {\n for (t5=32*t3;t5<=min(_PB_N-1,32*t3+31);t5++) {\n tmp[t4] = A[t4][t5] * x[t5] + tmp[t4];;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/gesummv.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5)", "context_chars": 100, "text": "[t4] = A[t4][t5] * x[t5] + tmp[t4];;\n }\n }\n }\n }\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n lbv=32*t2;\n ubv=min(_PB_N-1,32*t2+31);\n#pragma ivdep\n#pragma vector always\n for (t3=lbv;t3<=ubv;t3++) {\n y[t3] = alpha * tmp[t3] + beta * y[t3];;\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/syrk.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "gister int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_N >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=t2;t3++) {\n for (t4=32*t2;t4<=min(_PB_N-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=min(t4,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n C[t4][t5] *= beta;;\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/syrk.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)", "context_chars": 100, "text": "5] *= beta;;\n }\n }\n }\n }\n if (_PB_M >= 1) {\n lbp=0;\n ubp=floord(_PB_N-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=t2;t3++) {\n for (t4=0;t4<=floord(_PB_M-1,32);t4++) {\n for (t5=32*t2;t5<=min(_PB_N-1,32*t2+31);t5++) {\n for (t6=32*t3;t6<=min(t5,32*t3+31);t6++) {\n for (t7=32*t4;t7<=min(_PB_M-1,32*t4+31);t7++) {\n C[t5][t6] += alpha * A[t5][t7] * A[t6][t7];;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/2mm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8,t9)", "context_chars": 100, "text": "ster int lbv, ubv;\n/* Start of CLooG code */\nif (_PB_NI >= 1) {\n lbp=0;\n ubp=floord(_PB_NI-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n if ((_PB_NJ >= 0) && (_PB_NL >= 0)) {\n for (t3=0;t3<=floord(_PB_NJ+_PB_NL-1,32);t3++) {\n if ((_PB_NJ >= _PB_NL+1) && (t3 <= floord(_PB_NL-1,32)) && (t3 >= ceild(_PB_NL-31,32))) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=_PB_NL-1;\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n lbv=_PB_NL;\n ubv=min(_PB_NJ-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((_PB_NJ >= _PB_NL+1) && (t3 <= floord(_PB_NL-32,32))) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=32*t3+31;\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((_PB_NJ <= _PB_NL-1) && (t3 <= floord(_PB_NJ-1,32)) && (t3 >= ceild(_PB_NJ-31,32))) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=_PB_NJ-1;\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n lbv=_PB_NJ;\n ubv=min(_PB_NL-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n }\n }\n }\n if ((_PB_NJ <= _PB_NL-1) && (t3 <= floord(_PB_NJ-32,32))) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=32*t3+31;\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((_PB_NJ == _PB_NL) && (t3 <= floord(_PB_NJ-1,32))) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=min(_PB_NJ-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((t3 <= floord(_PB_NJ-1,32)) && (t3 >= ceild(_PB_NL,32))) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=min(_PB_NJ-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n if ((t3 <= floord(_PB_NL-1,32)) && (t3 >= ceild(_PB_NJ,32))) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=min(_PB_NL-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n }\n }\n }\n }\n }\n if (_PB_NL <= -1) {\n for (t3=0;t3<=floord(_PB_NJ-1,32);t3++) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=min(_PB_NJ-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n tmp[t4][t5] = SCALAR_VAL(0.0);;\n }\n }\n }\n }\n if (_PB_NJ <= -1) {\n for (t3=0;t3<=floord(_PB_NL-1,32);t3++) {\n for (t4=32*t2;t4<=min(_PB_NI-1,32*t2+31);t4++) {\n lbv=32*t3;\n ubv=min(_PB_NL-1,32*t3+31);\n#pragma ivdep\n#pragma vector always\n for (t5=lbv;t5<=ubv;t5++) {\n D[t4][t5] *= beta;;\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8,t9)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/sfs-tiling/pluto/32/2mm.c", "omp_pragma_line": "#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8,t9)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n if (_PB_NJ >= 1) {\n lbp=0;\n ubp=floord(_PB_NI-1,32);\nfor (t2=lbp;t2<=ubp;t2++) {\n for (t3=0;t3<=floord(_PB_NJ-1,32);t3++) {\n if (_PB_NK >= 1) {\n for (t5=0;t5<=floord(_PB_NK-1,32);t5++) {\n for (t6=32*t2;t6<=min(_PB_NI-1,32*t2+31);t6++) {\n for (t7=32*t3;t7<=min(_PB_NJ-1,32*t3+31);t7++) {\n for (t9=32*t5;t9<=min(_PB_NK-1,32*t5+31);t9++) {\n tmp[t6][t7] += alpha * A[t6][t9] * B[t9][t7];;\n }\n }\n }\n }\n }\n if (_PB_NL >= 1) {\n for (t5=0;t5<=floord(_PB_NL-1,32);t5++) {\n for (t6=32*t2;t6<=min(_PB_NI-1,32*t2+31);t6++) {\n for (t7=32*t3;t7<=min(_PB_NJ-1,32*t3+31);t7++) {\n lbv=32*t5;\n ubv=min(_PB_NL-1,32*t5+31);\n#pragma ivdep\n#pragma vector always\n for (t9=lbv;t9<=ubv;t9++) {\n D[t6][t9] += tmp[t6][t7] * C[t7][t9];;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8,t9)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/merge-tiling/parameterized/trisolv.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ") < (y) ? (x) : (y))\n#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))\n#pragma scop\n{\n for (int ii0 = 0; ii0 <= floord(_PB_N - 1, 64); ii0 += 1)\n for (int c4 = 64 * ii0; c4 <= min(_PB_N - 1, 64 * ii0 + 63); c4 += 1)\n x[c4] = b[c4];\n for (int k = 1; k <= (_PB_N + 30) / 32; k += 1)\n #pragma omp parallel for\n for (int ii0 = k / 2; ii0 <= min(k - 1, (_PB_N - 1) / 64); ii0 += 1) {\n if (k >= 2) {\n for (int c4 = 64 * ii0; c4 <= min(_PB_N - 1, 64 * ii0 + 63); c4 += 1) {\n for (int c6 = 64 * k - 64 * ii0 - 64; c6 < min(64 * k - 64 * ii0, c4); c6 += 1)\n x[c4] -= (L[c4][c6] * x[c6]);\n if (2 * ii0 + 1 == k)\n x[c4] = (x[c4] / L[c4][c4]);\n }\n } else\n for (int c4 = 0; c4 <= min(63, _PB_N - 1); c4 += 1) {\n for (int c6 = 0; c6 < c4; c6 += 1)\n x[c4] -= (L[c4][c6] * x[c6]);\n x[c4] = (x[c4] / L[c4][c4]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/merge-tiling/parameterized/trisolv.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "64 * ii0 + 63); c4 += 1)\n x[c4] = b[c4];\n for (int k = 1; k <= (_PB_N + 30) / 32; k += 1)\n for (int ii0 = k / 2; ii0 <= min(k - 1, (_PB_N - 1) / 64); ii0 += 1) {\n if (k >= 2) {\n for (int c4 = 64 * ii0; c4 <= min(_PB_N - 1, 64 * ii0 + 63); c4 += 1) {\n for (int c6 = 64 * k - 64 * ii0 - 64; c6 < min(64 * k - 64 * ii0, c4); c6 += 1)\n x[c4] -= (L[c4][c6] * x[c6]);\n if (2 * ii0 + 1 == k)\n x[c4] = (x[c4] / L[c4][c4]);\n }\n } else\n for (int c4 = 0; c4 <= min(63, _PB_N - 1); c4 += 1) {\n for (int c6 = 0; c6 < c4; c6 += 1)\n x[c4] -= (L[c4][c6] * x[c6]);\n x[c4] = (x[c4] / L[c4][c4]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/merge-tiling/parameterized/cholesky.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "))\n#pragma scop\n{\n if (_PB_N >= 65)\n for (int k = 0; k <= (3 * _PB_N - 6) / 64; k += 1) {\n for (int ii0 = max(k - (_PB_N + 29) / 32 + 1, (k + 1) / 3); ii0 <= min(k, (_PB_N - 1) / 64); ii0 += 1) {\n if (k >= 1) {\n for (int ii2 = -ii0 + (k + ii0 + 1) / 2; ii2 <= min(min(ii0, k - ii0), (_PB_N - 2) / 64); ii2 += 1) {\n if (3 * ii0 == k && 3 * ii2 == k) {\n A[64 * k / 3][64 * k / 3] = SQRT_FUN(A[64 * k / 3][64 * k / 3]);\n if (64 * k + 195 >= 3 * _PB_N) {\n A[(64 * k / 3) + 1][64 * k / 3] /= A[64 * k / 3][64 * k / 3];\n A[(64 * k / 3) + 1][(64 * k / 3) + 1] -= (A[(64 * k / 3) + 1][64 * k / 3] * A[(64 * k / 3) + 1][64 * k / 3]);\n A[(64 * k / 3) + 1][(64 * k / 3) + 1] = SQRT_FUN(A[(64 * k / 3) + 1][(64 * k / 3) + 1]);\n } else {\n A[(64 * k / 3) + 1][64 * k / 3] /= A[64 * k / 3][64 * k / 3];\n A[(64 * k / 3) + 1][(64 * k / 3) + 1] -= (A[(64 * k / 3) + 1][64 * k / 3] * A[(64 * k / 3) + 1][64 * k / 3]);\n A[(64 * k / 3) + 1][(64 * k / 3) + 1] = SQRT_FUN(A[(64 * k / 3) + 1][(64 * k / 3) + 1]);\n }\n }\n for (int c6 = max(max(64 * ii0, 32 * k - 32 * ii0 + 2), 64 * ii2 + 1); c6 <= min(_PB_N - 1, 64 * ii0 + 63); c6 += 1) {\n for (int c8 = 64 * ii2; c8 <= min(64 * ii2 + 63, c6 - 1); c8 += 1) {\n for (int c10 = 64 * k - 64 * ii0 - 64 * ii2; c10 <= min(64 * k - 64 * ii0 - 64 * ii2 + 63, c8 - 1); c10 += 1)\n A[c6][c8] -= (A[c6][c10] * A[c8][c10]);\n if (ii0 + 2 * ii2 == k)\n A[c6][c8] /= A[c8][c8];\n }\n if (_PB_N >= 64 * ii0 + 2 && ii0 + 2 * ii2 == k) {\n for (int c8 = 32 * k - 32 * ii0; c8 <= min(32 * k - 32 * ii0 + 63, c6 - 1); c8 += 1)\n A[c6][c6] -= (A[c6][c8] * A[c6][c8]);\n if (3 * ii0 == k)\n A[c6][c6] = SQRT_FUN(A[c6][c6]);\n } else if (3 * _PB_N >= 64 * k + 259 && 64 * ii0 + 1 == _PB_N && _PB_N + 128 * ii2 == 64 * k + 1 && c6 + 1 == _PB_N)\n for (int c8 = ((-_PB_N + 1) / 2) + 32 * k; c8 <= ((-_PB_N + 127) / 2) + 32 * k; c8 += 1)\n A[_PB_N - 1][_PB_N - 1] -= (A[_PB_N - 1][c8] * A[_PB_N - 1][c8]);\n }\n }\n if (64 * k + 67 == 3 * _PB_N && 64 * ii0 + 1 == _PB_N)\n for (int c8 = _PB_N - 65; c8 < _PB_N - 1; c8 += 1)\n A[_PB_N - 1][_PB_N - 1] -= (A[_PB_N - 1][c8] * A[_PB_N - 1][c8]);\n } else\n for (int c6 = 0; c6 <= 63; c6 += 1) {\n for (int c8 = 0; c8 < c6; c8 += 1) {\n for (int c10 = 0; c10 < c8; c10 += 1)\n A[c6][c8] -= (A[c6][c10] * A[c8][c10]);\n A[c6][c8] /= A[c8][c8];\n }\n for (int c8 = 0; c8 < c6; c8 += 1)\n A[c6][c6] -= (A[c6][c8] * A[c6][c8]);\n A[c6][c6] = SQRT_FUN(A[c6][c6]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/merge-tiling/unparameterized/symm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (y))\n#define max(x,y) ((x) > (y) ? (x) : (y))\n#pragma scop\nfor (int k = 0; k <= 32; k += 1) {\n for (int ii1 = 0; ii1 <= 40; ii1 += 1)\n for (int ii3 = 0; ii3 < k; ii3 += 1) {\n if (k <= 31) {\n for (int c6 = max(64 * k - 64, 64 * ii3 + 1); c6 < 64 * k; c6 += 1)\n for (int c7 = 64 * ii1; c7 <= min(2599, 64 * ii1 + 63); c7 += 1)\n for (int c9 = 64 * ii3; c9 <= min(64 * ii3 + 63, c6 - 1); c9 += 1)\n C[c9][c7] += ((alpha * B[c6][c7]) * A[c6][c9]);\n } else\n for (int c6 = max(1984, 64 * ii3 + 1); c6 <= 1999; c6 += 1) {\n if (ii1 <= 39) {\n for (int c7 = 64 * ii1; c7 <= 64 * ii1 + 63; c7 += 1)\n for (int c9 = 64 * ii3; c9 <= min(64 * ii3 + 63, c6 - 1); c9 += 1)\n C[c9][c7] += ((alpha * B[c6][c7]) * A[c6][c9]);\n } else\n for (int c7 = 2560; c7 <= 2599; c7 += 1)\n for (int c9 = 64 * ii3; c9 <= min(64 * ii3 + 63, c6 - 1); c9 += 1)\n C[c9][c7] += ((alpha * B[c6][c7]) * A[c6][c9]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/merge-tiling/unparameterized/gramschmidt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ne min(x,y) ((x) < (y) ? (x) : (y))\n#define max(x,y) ((x) > (y) ? (x) : (y))\n#pragma scop\n{\n for (int ii0 = 0; ii0 <= 40; ii0 += 1)\n for (int ii2 = ii0; ii2 <= 40; ii2 += 1)\n for (int c6 = 64 * ii0; c6 <= min(min(2598, 64 * ii0 + 63), 64 * ii2 + 62); c6 += 1) {\n if (ii2 <= 39) {\n for (int c8 = max(64 * ii2, c6 + 1); c8 <= 64 * ii2 + 63; c8 += 1)\n R[c6][c8] = SCALAR_VAL(0.0);\n } else\n for (int c8 = max(2560, c6 + 1); c8 <= 2599; c8 += 1)\n R[c6][c8] = SCALAR_VAL(0.0);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/merge-tiling/unparameterized/gramschmidt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "c6][c8] = SCALAR_VAL(0.0);\n }\n for (int k = 1; k <= 81; k += 1) {\n if (k % 2 == 0) {\n for (int ii2 = k / 2; ii2 <= 40; ii2 += 1)\n for (int c6 = 32 * k - 64; c6 < 32 * k; c6 += 1) {\n if (ii2 <= 39) {\n for (int c8 = 64 * ii2; c8 <= 64 * ii2 + 63; c8 += 1) {\n for (int c10 = 0; c10 <= 1999; c10 += 1)\n R[c6][c8] += (Q[c10][c6] * A[c10][c8]);\n for (int c10 = 0; c10 <= 1999; c10 += 1)\n A[c10][c8] = (A[c10][c8] - (Q[c10][c6] * R[c6][c8]));\n }\n } else\n for (int c8 = 2560; c8 <= 2599; c8 += 1) {\n for (int c10 = 0; c10 <= 1999; c10 += 1)\n R[c6][c8] += (Q[c10][c6] * A[c10][c8]);\n for (int c10 = 0; c10 <= 1999; c10 += 1)\n A[c10][c8] = (A[c10][c8] - (Q[c10][c6] * R[c6][c8]));\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/results/merge-tiling/unparameterized/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(int c9 = 3904; c9 <= 3967; c9 += 1)\n A[c6][c8] -= (A[c6][c9] * A[c9][c8]);\n } else\n for (int ii0 = max(0, (k + 1) / 2 - 31); ii0 <= min(62, k); ii0 += 1) {\n if (k >= 1) {\n if (3 * ii0 >= k) {\n for (int ii2 = -ii0 + (k + ii0 + 1) / 2; ii2 <= min(min(61, ii0), k - ii0); ii2 += 1)\n for (int c6 = 64 * ii0 + 1; c6 <= min(3999, 64 * ii0 + 64); c6 += 1) {\n if (ii0 == 62 && 2 * ii2 + 62 == k) {\n A[c6][32 * k - 1984] /= A[32 * k - 1984][32 * k - 1984];\n } else if (ii0 + 2 * ii2 == k)\n A[c6][32 * k - 32 * ii0] /= A[32 * k - 32 * ii0][32 * k - 32 * ii0];\n for (int c8 = max(64 * ii2, 64 * k - 64 * ii0 - 64 * ii2 + 1); c8 <= min(64 * ii2 + 63, c6 - 1); c8 += 1) {\n for (int c10 = 64 * k - 64 * ii0 - 64 * ii2; c10 <= min(64 * k - 64 * ii0 - 64 * ii2 + 63, c8 - 1); c10 += 1)\n A[c6][c8] -= (A[c6][c10] * A[c10][c8]);\n if (ii0 <= 61 && ii0 + 2 * ii2 == k) {\n A[c6][c8] /= A[c8][c8];\n } else if (ii0 == 62 && 2 * ii2 + 62 == k)\n A[c6][c8] /= A[c8][c8];\n }\n if (3 * ii0 == k && 3 * ii2 == k)\n for (int c8 = c6; c8 <= (64 * k / 3) + 63; c8 += 1)\n for (int c9 = 64 * k / 3; c9 < c6; c9 += 1)\n A[c6][c8] -= (A[c6][c9] * A[c9][c8]);\n }\n if (k >= 124 && ii0 == 62)\n for (int c6 = 3969; c6 <= 3999; c6 += 1)\n for (int c8 = 3968; c8 < c6; c8 += 1)\n for (int c10 = 64 * k - 7936; c10 < 64 * k - 7872; c10 += 1)\n A[c6][c8] -= (A[c6][c10] * A[c10][c8]);\n if (k >= 124 && ii0 == 62)\n for (int c6 = 3969; c6 <= 3999; c6 += 1)\n for (int c8 = c6; c8 <= 3999; c8 += 1)\n for (int c9 = 64 * k - 7936; c9 < 64 * k - 7872; c9 += 1)\n A[c6][c8] -= (A[c6][c9] * A[c9][c8]);\n }\n if (ii0 <= 61)\n for (int ii2 = max(max(ii0, k - 2 * ii0), -ii0 + (k + ii0) / 2 + 1); ii2 <= min(62, k - ii0); ii2 += 1) {\n if (2 * ii0 + ii2 >= k + 1) {\n for (int c6 = 64 * ii0 + 1; c6 <= min(64 * ii0 + 64, 64 * ii2 + 63); c6 += 1)\n for (int c8 = max(64 * ii2, c6); c8 <= min(3999, 64 * ii2 + 63); c8 += 1)\n for (int c9 = 64 * k - 64 * ii0 - 64 * ii2; c9 <= 64 * k - 64 * ii0 - 64 * ii2 + 63; c9 += 1)\n A[c6][c8] -= (A[c6][c9] * A[c9][c8]);\n } else\n for (int c6 = 64 * ii0 + 1; c6 <= 64 * ii0 + 64; c6 += 1)\n for (int c8 = 64 * k - 128 * ii0; c8 <= min(3999, 64 * k - 128 * ii0 + 63); c8 += 1)\n for (int c9 = 64 * ii0; c9 < c6; c9 += 1)\n A[c6][c8] -= (A[c6][c9] * A[c9][c8]);\n }\n } else\n for (int c6 = 1; c6 <= 64; c6 += 1) {\n for (int c8 = 0; c8 < c6; c8 += 1) {\n for (int c10 = 0; c10 < c8; c10 += 1)\n A[c6][c8] -= (A[c6][c10] * A[c10][c8]);\n A[c6][c8] /= A[c8][c8];\n }\n for (int c8 = c6; c8 <= 63; c8 += 1)\n for (int c9 = 0; c9 < c6; c9 += 1)\n A[c6][c8] -= (A[c6][c9] * A[c9][c8]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/src/for_decorator.cpp", "omp_pragma_line": "#pragma omp parallel for\");", "context_chars": 100, "text": " printer = isl_printer_start_line(printer);\n printer = isl_printer_print_str(printer, \"printer = isl_printer_end_line(printer);\n }\n\n if (!inline_variables)\n {\n isl_ast_expr_free(init);\n init = isl_ast_expr_from_id(isl_id_alloc(ctx, lb_variable, NULL));\n \n cond = isl_ast_expr_set_op_arg(cond, 1, isl_ast_expr_from_id(isl_id_alloc(ctx, ub_variable, NULL)));\n } #pragma omp parallel for\");"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/piotr-skotnicki/tc-optimizer/src/for_decorator.cpp", "omp_pragma_line": "#pragma omp parallel for\");", "context_chars": 100, "text": " printer = isl_printer_start_line(printer);\n printer = isl_printer_print_str(printer, \"printer = isl_printer_end_line(printer);\n }\n\n if (!inline_variables)\n {\n isl_ast_expr_free(init);\n init = isl_ast_expr_from_id(isl_id_alloc(ctx, lb_variable, NULL));\n \n cond = isl_ast_expr_set_op_arg(cond, 1, isl_ast_expr_from_id(isl_id_alloc(ctx, ub_variable, NULL)));\n } #pragma omp parallel for\");"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/known_hosts_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tSHA_CTX ctx;\n\t\tmemcpy(&ctx, &cur_salt->ipad_ctx, sizeof(ctx));\n\t\tSHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tSHA1_Final((unsigned char*) crypt_out[index], &ctx);\n\n\t\tmemcpy(&ctx, &cur_salt->opad_ctx, sizeof(ctx));\n\t\tSHA1_Update(&ctx, crypt_out[index], BINARY_SIZE);\n\t\tSHA1_Final((unsigned char*) crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/keepass_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 0;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char final_key[32];\n\t\tunsigned char *decrypted_content;\n\t\tSHA256_CTX ctx;\n\t\tunsigned char iv[16];\n\t\tunsigned char out[32];\n\t\tint pad_byte;\n\t\tint datasize;\n\t\tAES_KEY akey;\n\t\tTwofish_key tkey;\n\t\tstruct chacha_ctx ckey;\n\n\t\t// derive and set decryption key\n\t\ttransform_key(keepass_key[index], cur_salt, final_key);\n\t\tif (cur_salt->algorithm == 0) {\n\t\t\t/* AES decrypt cur_salt->contents with final_key */\n\t\t\tmemcpy(iv, cur_salt->enc_iv, 16);\n\t\t\tAES_set_decrypt_key(final_key, 256, &akey);\n\t\t} else if (cur_salt->algorithm == 1) {\n\t\t\tmemcpy(iv, cur_salt->enc_iv, 16);\n\t\t\tmemset(&tkey, 0, sizeof(Twofish_key));\n\t\t\tTwofish_prepare_key(final_key, 32, &tkey);\n\t\t} else if (cur_salt->algorithm == 2) { // ChaCha20\n\t\t\tmemcpy(iv, cur_salt->enc_iv, 16);\n\t\t\tchacha_keysetup(&ckey, final_key, 256);\n\t\t\tchacha_ivsetup(&ckey, iv, NULL, 12);\n\t\t}\n\n\t\tif (cur_salt->version == 1 && cur_salt->algorithm == 0) {\n\t\t\tdecrypted_content = mem_alloc(cur_salt->contentsize);\n\t\t\tAES_cbc_encrypt(cur_salt->contents, decrypted_content,\n\t\t\t cur_salt->contentsize, &akey, iv, AES_DECRYPT);\n\t\t\tpad_byte = decrypted_content[cur_salt->contentsize - 1];\n\t\t\tdatasize = cur_salt->contentsize - pad_byte;\n\t\t\tSHA256_Init(&ctx);\n\t\t\tSHA256_Update(&ctx, decrypted_content, datasize);\n\t\t\tSHA256_Final(out, &ctx);\n\t\t\tMEM_FREE(decrypted_content);\n\t\t\tif (!memcmp(out, cur_salt->contents_hash, 32)) {\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t\telse if (cur_salt->version == 2 && cur_salt->algorithm == 0) {\n\t\t\tunsigned char dec_buf[32];\n\n\t\t\tAES_cbc_encrypt(cur_salt->contents, dec_buf, 32,\n\t\t\t &akey, iv, AES_DECRYPT);\n\t\t\tif (!memcmp(dec_buf, cur_salt->expected_bytes, 32)) {\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t\telse if (cur_salt->version == 2 && cur_salt->algorithm == 2) {\n\t\t\tunsigned char dec_buf[32];\n\n\t\t\tchacha_decrypt_bytes(&ckey, cur_salt->contents, dec_buf, 32, 20);\n\t\t\tif (!memcmp(dec_buf, cur_salt->expected_bytes, 32)) {\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\n\t\t}\n\t\telse if (cur_salt->version == 1 && cur_salt->algorithm == 1) { /* KeePass 1.x with Twofish */\n\t\t\tint crypto_size;\n\n\t\t\tdecrypted_content = mem_alloc(cur_salt->contentsize);\n\t\t\tcrypto_size = Twofish_Decrypt(&tkey, cur_salt->contents,\n\t\t\t decrypted_content,\n\t\t\t cur_salt->contentsize, iv);\n\t\t\tdatasize = crypto_size; // awesome, right?\n\t\t\tif (datasize <= cur_salt->contentsize && datasize > 0) {\n\t\t\t\tSHA256_Init(&ctx);\n\t\t\t\tSHA256_Update(&ctx, decrypted_content, datasize);\n\t\t\t\tSHA256_Final(out, &ctx);\n\t\t\t\tif (!memcmp(out, cur_salt->contents_hash, 32)) {\n\t\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t}\n\t\t\tMEM_FREE(decrypted_content);\n\t\t} else {\n\t\t\t// KeePass version 2 with Twofish is TODO. Twofish support under KeePass version 2\n\t\t\t// requires a third-party plugin. See http://keepass.info/plugins.html for details.\n\t\t\terror_msg(\"KeePass v2 w/ Twofish not supported yet\");\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/django_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT], i;\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT];\n\t\tunion {\n\t\t\tuint32_t *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tx.pout[i] = crypt_out[i+index];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt.c, strlen((char*)cur_salt->salt.c), cur_salt->iterations, &(x.poutc), 32, 0);\n#else\n\t\tpbkdf2_sha256((unsigned char *)saved_key[index], strlen(saved_key[index]),\n\t\t\tcur_salt->salt.c, strlen((char*)cur_salt->salt.c),\n\t\t\tcur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/zipmonster_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char buffer[BINARY_SIZE];\n\t\tMD5_CTX ctx;\n\t\tint n = 49999;\n#ifdef SIMD_COEF_32\n\t\tint j, k;\n\t\tuint32_t *p, t;\n\t\tuint8_t ib[64 * SIMD_COEF_32 * SIMD_PARA_MD5 + MEM_ALIGN_SIMD];\n\t\tuint8_t ob[16 * SIMD_COEF_32 * SIMD_PARA_MD5 + MEM_ALIGN_SIMD];\n\t\tuint8_t *md5 = mem_align(ib, MEM_ALIGN_SIMD);\n\t\tuint32_t *crypt_buf = mem_align(ob, MEM_ALIGN_SIMD);\n\n\t\tmemset(md5, 0, 64 * SIMD_COEF_32 * SIMD_PARA_MD5);\n\n\t\tfor (j = 0; j < SIMD_COEF_32*SIMD_PARA_MD5; ++j) {\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\tuint16_t *op = (uint16_t*)&md5[GETPOS(0, j)];\n#else\n\t\t\tuint16_t *op = (uint16_t*)&md5[GETPOS(3, j)];\n\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, saved_key[index+j], strlen(saved_key[index+j]));\n\t\t\tMD5_Final(buffer, &ctx);\n\n\t\t\tfor (k = 0; k < 16; ++k) {\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\top[0] = itoa16u_w[buffer[k++]];\n\t\t\t\top[1] = itoa16u_w[buffer[k]];\n#else\n\t\t\t\top[1] = itoa16u_w[buffer[k++]];\n\t\t\t\top[0] = itoa16u_w[buffer[k]];\n\n\t\t\t\top += ((SIMD_COEF_32) << 1);\n\t\t\t}\n\t\t\tmd5[GETPOS(32,j)] = 0x80;\n\t\t\tmd5[GETPOS(57,j)] = 1;\n\t\t}\n#else\n\t\tunsigned char hex_buffer[BINARY_SIZE * 2];\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tMD5_Final(buffer, &ctx);\n\t\thex_encode_uppercase(buffer, hex_buffer);\n\n\n\t\tdo {\n#ifdef SIMD_COEF_32\n\t\t\tSIMDmd5body(md5, crypt_buf, NULL, SSEi_MIXED_IN);\n\t\t\t// upper case hex encode into the next input buffer.\n\t\t\tfor (j = 0; j < SIMD_PARA_MD5*SIMD_COEF_32; ++j) {\n\t\t\t\tint i;\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\tuint16_t *op = (uint16_t*)&md5[GETPOS(0, j)];\n#else\n\t\t\t\tuint16_t *op = (uint16_t*)&md5[GETPOS(3, j)];\n\n\t\t\t\tp = &crypt_buf[(j&(SIMD_COEF_32-1))+(4*SIMD_COEF_32*(j/SIMD_COEF_32))];\n\t\t\t\tfor (i = 0; i < 4; ++i) {\n\t\t\t\t\tt = *p;\n\t\t\t\t\tp += SIMD_COEF_32;\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\t\top[0] = itoa16u_w[t&0xFF];\n\t\t\t\t\top[1] = itoa16u_w[(t>>8)&0xFF];\n\t\t\t\t\tt >>= 16;\n\t\t\t\t\top += ((SIMD_COEF_32) << 1);\n\t\t\t\t\top[0] = itoa16u_w[t&0xFF];\n\t\t\t\t\top[1] = itoa16u_w[(t>>8)&0xFF];\n#else\n\t\t\t\t\top[1] = itoa16u_w[t&0xFF];\n\t\t\t\t\top[0] = itoa16u_w[(t>>8)&0xFF];\n\t\t\t\t\tt >>= 16;\n\t\t\t\t\top += ((SIMD_COEF_32) << 1);\n\t\t\t\t\top[1] = itoa16u_w[t&0xFF];\n\t\t\t\t\top[0] = itoa16u_w[(t>>8)&0xFF];\n\n\t\t\t\t\top += ((SIMD_COEF_32) << 1);\n\n\t\t\t\t}\n\t\t\t}\n#else\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, hex_buffer, BINARY_SIZE * 2);\n\t\t\tMD5_Final(buffer, &ctx);\n\t\t\thex_encode_uppercase(buffer, hex_buffer);\n\n\t\t\t--n;\n\t\t} while (n);\n#ifdef SIMD_COEF_32\n\t\tp = crypt_buf;\n\t\tfor (j = 0; j < SIMD_PARA_MD5*SIMD_COEF_32; j+=SIMD_COEF_32) {\n\t\t\tfor (k = 0; k < SIMD_COEF_32*4; ++k) {\n\t\t\t\tuint32_t J = j+(k&(SIMD_COEF_32-1)), K = (k/SIMD_COEF_32);\n\t\t\t\tcrypt_out[index+J][K] = *p++;\n\t\t\t}\n\t\t}\n#else\n\t\tmemcpy((unsigned char*)crypt_out[index], buffer, BINARY_SIZE);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/7z_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = index;\n\t\t\t}\n\t\t\twhile (tot_todo % NBKEYS)\n\t\t\t\tindices[tot_todo++] = count;\n\t\t}\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < tot_todo; index += NBKEYS) {\n\t\tint j;\n\n\t\tif (new_keys)\n\t\t\tsevenzip_kdf(index/NBKEYS, indices + index, master[index]);\n\n\t\t/* do decryption and checks */\n\t\tfor (j = 0; j < NBKEYS; ++j) {\n\t\t\tcracked[indices[index + j]] = sevenzip_decrypt(master[index + j]);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/7z_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "{\n\t\t\tcracked[indices[index + j]] = sevenzip_decrypt(master[index + j]);\n\t\t}\n\t}\n#else\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\t/* derive key */\n\t\tif (new_keys)\n\t\t\tsevenzip_kdf(index, master[index]);\n\n\t\t/* do decryption and checks */\n\t\tcracked[index] = sevenzip_decrypt(master[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/truecrypt_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "#define inner_batch_size 1\n#endif\n\n\tmemset(cracked, 0, sizeof(cracked[0]) * count);\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i += inner_batch_size) {\n\t\tunsigned char keys[INNER_BATCH_MAX_SZ][64];\n\t\tint lens[INNER_BATCH_MAX_SZ];\n\t\tint j;\n\n\t\tfor (j = 0; j < inner_batch_size; ++j) {\n\t\t\tlens[j] = strlen((char *)key_buffer[i+j]);\n\t\t\t/* zeroing of end by strncpy is important for keyfiles */\n\t\t\tstrncpy((char*)keys[j], (char*)key_buffer[i+j], 64);\n\t\t\t/* process keyfile(s) */\n\t\t\tif (psalt->nkeyfiles) {\n\t\t\t\tint t;\n\t\t\t\t/* Apply keyfile pool to passphrase */\n\t\t\t\tfor (t = 0; t < KPOOL_SZ; t++)\n\t\t\t\t\tkeys[j][t] += psalt->kpool[t];\n\t\t\t\tlens[j] = 64;\n\t\t\t}\n\t\t}\n\n\t\tif (psalt->hash_type == IS_SHA512) {\n#if SSE_GROUP_SZ_SHA512\n\t\t\tunsigned char *pin[SSE_GROUP_SZ_SHA512];\n\t\t\tunsigned char *pout[SSE_GROUP_SZ_SHA512];\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA512; ++j) {\n\t\t\t\tpin[j] = keys[j];\n\t\t\t\tpout[j] = keys[j];\n\t\t\t}\n\t\t\tpbkdf2_sha512_sse((const unsigned char **)pin, lens, psalt->salt, 64, psalt->num_iterations, pout, sizeof(keys[0]), 0);\n#else\n\t\t\tpbkdf2_sha512((const unsigned char*)keys[0], lens[0], psalt->salt, 64, psalt->num_iterations, keys[0], sizeof(keys[0]), 0);\n\n\t\t}\n\t\telse if (psalt->hash_type == IS_RIPEMD160 || psalt->hash_type == IS_RIPEMD160BOOT)\n\t\t\tpbkdf2_ripemd160((const unsigned char*)keys[0], lens[0], psalt->salt, 64, psalt->num_iterations, keys[0], sizeof(keys[0]), 0);\n\t\telse\n\t\t\tpbkdf2_whirlpool((const unsigned char*)keys[0], lens[0], psalt->salt, 64, psalt->num_iterations, keys[0], sizeof(keys[0]), 0);\n\n\t\tfor (j = 0; j < inner_batch_size; ++j) {\n\t\t\tcracked[i+j] = 0;\n\t\t\tif (decrypt_and_verify(keys[j], 0) // AES\n\t\t\t || decrypt_and_verify(keys[j], 1) // Twofish\n\t\t\t || decrypt_and_verify(keys[j], 2)) // Serpent\n\t\t\t\tcracked[i+j] = 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/encdatavault_pbkdf2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ", 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n\tnb_keys = 1 << (cur_salt->algo_id - 1);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_PBKDF2_CRYPT) {\n\t\tint i, j, key_len;\n\t\tbuffer_128 kdf_out[MIN_KEYS_PER_PBKDF2_CRYPT][ENC_MAX_KEY_NUM];\n\t\tbuffer_128 tmp;\n\t\tbuffer_128 ivs[ENC_MAX_KEY_NUM];\n\t\tunsigned char result[ENC_KEY_SIZE * ENC_MAX_KEY_NUM] = { 0 };\n\n\t\t// Key derviation based on PBKDF2-SHA256.\n\t\tunsigned char master[MIN_KEYS_PER_PBKDF2_CRYPT][ENC_KEY_SIZE * ENC_MAX_KEY_NUM];\n\t\tif (cur_salt->version == 1) {\n\t\t\tkey_len = nb_keys * ENC_KEY_SIZE;\n\t\t} else {\n\t\t\tkey_len = ENC_MAX_KEY_NUM * ENC_KEY_SIZE;\n\t\t}\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_PBKDF2_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_PBKDF2_CRYPT], *pout[MIN_KEYS_PER_PBKDF2_CRYPT];\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_PBKDF2_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index + i]);\n\t\t\tpin[i] = (unsigned char *)saved_key[index + i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length,\n\t\t\t\t\t\t\tcur_salt->iterations, pout, key_len, 0);\n\t\tfor (i = 0; i < MIN_KEYS_PER_PBKDF2_CRYPT; ++i) {\n\t\t\tfor (j = 0; j < ENC_MAX_KEY_NUM; j++) {\n\t\t\t\tmemcpy(kdf_out[i][j].u8, pout[i] + (j * ENC_KEY_SIZE), ENC_KEY_SIZE);\n\t\t\t}\n\t\t}\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_PBKDF2_CRYPT; ++i) {\n\t\t\tpbkdf2_sha256((unsigned char *)saved_key[index + i], strlen(saved_key[index + i]), cur_salt->salt,\n\t\t\t\t\t\t\tcur_salt->salt_length, cur_salt->iterations, master[i], key_len, 0);\n\t\t\tfor (j = 0; j < ENC_MAX_KEY_NUM; j++) {\n\t\t\t\tmemcpy(kdf_out[i][j].u8, master[i] + (j * ENC_KEY_SIZE), ENC_KEY_SIZE);\n\t\t\t}\n\t\t}\n\n\t\t/* AES iterated CTR */\n\t\tfor (i = 0; i < MIN_KEYS_PER_PBKDF2_CRYPT; ++i) {\n\t\t\tif (cur_salt->version == 1) {\n\t\t\t\tmemcpy(ivs[0].u8, cur_salt->iv, ENC_NONCE_SIZE);\n\t\t\t\tfor (j = 1; j < nb_keys; j++) {\n\t\t\t\t\tmemcpy(ivs[j].u8, cur_salt->iv, ENC_NONCE_SIZE);\n\t\t\t\t\tivs[j].u64[0] ^= kdf_out[i][j].u64[0];\n\t\t\t\t}\n\t\t\t\t// result buffer is used here to hold the decrypted data.\n\t\t\t\tenc_aes_ctr_iterated(cur_salt->encrypted_data, result, kdf_out[i][0].u8, ivs, AES_BLOCK_SIZE,\n\t\t\t\t nb_keys, 1);\n\t\t\t\tif (!memcmp(result + 4, \"\\xd2\\xc3\\xb4\\xa1\\x00\\x00\", MIN(cur_salt->encrypted_data_length, ENC_SIG_SIZE - 2))) {\n\t\t\t\t\tcracked[index + i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Decrypt keychain\n\t\t\t\tivs[0].u64[0] = 0;\n\t\t\t\tfor (j = 1; j < ENC_MAX_KEY_NUM; j++) {\n\t\t\t\t\tivs[j].u64[0] = kdf_out[i][ENC_MAX_KEY_NUM - j].u64[0];\n\t\t\t\t}\n\t\t\t\t// result buffer is used for the decrypted keys from the keychain\n\t\t\t\tenc_aes_ctr_iterated(cur_salt->keychain, result, kdf_out[i][0].u8, ivs, ENC_KEYCHAIN_SIZE,\n\t\t\t\t ENC_MAX_KEY_NUM, 0);\n\n\t\t\t\t// Decrypt data\n\t\t\t\tmemcpy(ivs[0].u8, cur_salt->iv, ENC_NONCE_SIZE);\n\t\t\t\tfor (j = 1; j < nb_keys; j++) {\n\t\t\t\t\tmemcpy(ivs[j].u8, cur_salt->iv, ENC_NONCE_SIZE);\n\t\t\t\t\tmemcpy(tmp.u8, result + j * 16, ENC_NONCE_SIZE);\n\t\t\t\t\tivs[j].u64[0] ^= tmp.u64[0];\n\t\t\t\t}\n\t\t\t\t// result buffer is reused here to hold the decrypted data.\n\t\t\t\tenc_aes_ctr_iterated(cur_salt->encrypted_data, result, result, ivs, AES_BLOCK_SIZE, nb_keys, 1);\n\t\t\t\tif (!memcmp(result + 4, \"\\xd2\\xc3\\xb4\\xa1\\x00\\x00\", MIN(cur_salt->encrypted_data_length, ENC_SIG_SIZE - 2))) {\n\t\t\t\t\tcracked[index + i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/odf_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char key[MIN_KEYS_PER_CRYPT][32];\n\t\tunsigned char hash[MIN_KEYS_PER_CRYPT][32];\n\t\tBF_KEY bf_key;\n\t\tint bf_ivec_pos, i;\n\t\tunsigned char ivec[8];\n\t\tunsigned char output[1024];\n\t\tSHA_CTX ctx;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\tif (cur_salt->checksum_type == 0 && cur_salt->cipher_type == 0) {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));\n\t\t\t\tSHA1_Final((unsigned char *)(hash[i]), &ctx);\n\t\t\t}\n#ifdef SIMD_COEF_32\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = 20;\n\t\t\t\tpin[i] = hash[i];\n\t\t\t\tpout[i] = key[i];\n\t\t\t}\n\t\t\tpbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,\n\t\t\t cur_salt->salt_length,\n\t\t\t cur_salt->iterations, pout,\n\t\t\t cur_salt->key_size, 0);\n#else\n\t\t\tpbkdf2_sha1(hash[0], 20, cur_salt->salt,\n\t\t\t cur_salt->salt_length,\n\t\t\t cur_salt->iterations, key[0],\n\t\t\t cur_salt->key_size, 0);\n\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tunsigned int crypt[5];\n\t\t\t\tbf_ivec_pos = 0;\n\t\t\t\tmemcpy(ivec, cur_salt->iv, 8);\n\t\t\t\tBF_set_key(&bf_key, cur_salt->key_size, key[i]);\n\t\t\t\tBF_cfb64_encrypt(cur_salt->content, output, cur_salt->content_length, &bf_key, ivec, &bf_ivec_pos, BF_DECRYPT);\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, output, cur_salt->original_length);\n\t\t\t\tSHA1_Final((unsigned char*)crypt, &ctx);\n\t\t\t\tcrypt_out[index+i][0] = crypt[0];\n\t\t\t\tif (cur_salt->original_length % 64 >= 52 && cur_salt->original_length % 64 <= 55)\n\t\t\t\t\tSHA1_odf_buggy(output, cur_salt->original_length, crypt);\n\t\t\t\tcrypt_out[index+i][1] = crypt[0];\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tSHA256_CTX ctx;\n\t\t\tAES_KEY akey;\n\t\t\tunsigned char iv[16];\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tSHA256_Init(&ctx);\n\t\t\t\tSHA256_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));\n\t\t\t\tSHA256_Final((unsigned char *)hash[i], &ctx);\n\t\t\t}\n#ifdef SIMD_COEF_32\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = 32;\n\t\t\t\tpin[i] = hash[i];\n\t\t\t\tpout[i] = key[i];\n\t\t\t}\n\t\t\tpbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,\n\t\t\t cur_salt->salt_length,\n\t\t\t cur_salt->iterations, pout,\n\t\t\t cur_salt->key_size, 0);\n#else\n\t\t\tpbkdf2_sha1(hash[0], 32, cur_salt->salt,\n\t\t\t cur_salt->salt_length,\n\t\t\t cur_salt->iterations, key[0],\n\t\t\t cur_salt->key_size, 0);\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tunsigned int crypt[8];\n\t\t\t\tmemcpy(iv, cur_salt->iv, 16);\n\t\t\t\tAES_set_decrypt_key(key[i], 256, &akey);\n\t\t\t\tAES_cbc_encrypt(cur_salt->content, output, cur_salt->content_length, &akey, iv, AES_DECRYPT);\n\t\t\t\tSHA256_Init(&ctx);\n\t\t\t\tSHA256_Update(&ctx, output, cur_salt->content_length);\n\t\t\t\tSHA256_Final((unsigned char*)crypt, &ctx);\n\t\t\t\tcrypt_out[index+i][0] = crypt[0];\n\t\t\t\tcrypt_out[index+i][1] = crypt[0];\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/restic_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(index) shared(count, failed, params, max_threads, local, saved_key, cur_salt, crypt_out)", "context_chars": 100, "text": "\tyescrypt_params_t params = {.N = cur_salt->N, .r = cur_salt->r, .p = cur_salt->p};\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunion {\n\t\t\tuint8_t u8[80];\n\t\t\tuint64_t u64[10];\n\t\t} kdf_out;\n#ifdef _OPENMP\n\t\tint t = omp_get_thread_num();\n\t\tif (t >= max_threads) {\n\t\t\tfailed = -1;\n\t\t\tcontinue;\n\t\t}\n#else\n\t\tconst int t = 0;\n\n\t\tif (yescrypt_kdf(NULL, &local[t], (const uint8_t *)saved_key[index],\n\t\t strlen(saved_key[index]),\n\t\t (const uint8_t *)cur_salt->salt, sizeof(cur_salt->salt), ¶ms, kdf_out.u8, 64)) {\n\t\t\tfailed = errno ? errno : EINVAL;\n#ifndef _OPENMP\n\t\t\tbreak;\n\n\t\t}\n\n\t\tstatic const union {\n\t\t\tuint8_t u8[16];\n\t\t\tuint64_t u64[2];\n\t\t} key_mask = {\n\t\t\t.u8 = {\n\t\t\t\t0xff, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f,\n\t\t\t\t0xfc, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f\n\t\t\t}\n\t\t};\n\t\tkdf_out.u64[6] &= key_mask.u64[0];\n\t\tkdf_out.u64[7] &= key_mask.u64[1];\n\t\tconst uint8_t *poly1305_key = &kdf_out.u8[32];\n\n\t\tconst unsigned char *nonce = cur_salt->data;\n\t\tconst unsigned char *ciphertext = cur_salt->data + NONCE_SIZE;\n\n\t\tAES_KEY aeskey;\n\t\tAES_set_encrypt_key(poly1305_key, 128, &aeskey);\n\t\tunsigned char *prepared_key = &kdf_out.u8[48];\n\t\tAES_ecb_encrypt(nonce, prepared_key + 16, &aeskey, AES_ENCRYPT);\n\t\tpoly1305_auth((unsigned char *)crypt_out[index], ciphertext, 128, prepared_key);\n\t} #pragma omp parallel for default(none) private(index) shared(count, failed, params, max_threads, local, saved_key, cur_salt, crypt_out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DOMINOSEC8_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += 3) {\n\t\tint i, j;\n\n\t\t// domino 5 hash - SEC_pwddigest_V1 - -m 8600\n\t\tif (keys_changed) {\n\t\t\tchar *k0 = saved_key[index];\n\t\t\tchar *k1 = saved_key[index + 1];\n\t\t\tchar *k2 = saved_key[index + 2];\n\t\t\tunsigned char digest16[3][16];\n\t\t\tdomino_big_md_3((unsigned char *)k0, strlen(k0),\n\t\t\t\t\t(unsigned char *)k1, strlen(k1),\n\t\t\t\t\t(unsigned char *)k2, strlen(k2),\n\t\t\t\t\tdigest16[0], digest16[1], digest16[2]);\n\n\t\t\t// Not (++i < 16) !\n\t\t\t// Domino will do hash of first 34 bytes ignoring The Fact that now\n\t\t\t// there is a salt at a beginning of buffer. This means that last 5\n\t\t\t// bytes \"EEFF)\" of password digest are meaningless.\n\t\t\tfor (i = 0, j = 6; i < 14; i++, j += 2) {\n\t\t\t\tconst char *hex2 = hex_table[ARCH_INDEX(digest16[0][i])];\n\t\t\t\tdigest34[index][j] = hex2[0];\n\t\t\t\tdigest34[index][j + 1] = hex2[1];\n\t\t\t\thex2 = hex_table[ARCH_INDEX(digest16[1][i])];\n\t\t\t\tdigest34[index + 1][j] = hex2[0];\n\t\t\t\tdigest34[index + 1][j + 1] = hex2[1];\n\t\t\t\thex2 = hex_table[ARCH_INDEX(digest16[2][i])];\n\t\t\t\tdigest34[index + 2][j] = hex2[0];\n\t\t\t\tdigest34[index + 2][j + 1] = hex2[1];\n\t\t\t}\n\t\t}\n\n\t\t// domino 6 hash - SEC_pwddigest_V2 - -m 8700\n\t\tif (salt_changed) {\n\t\t\tdigest34[index + 2][0] = digest34[index + 1][0] = digest34[index][0] = cur_salt->salt[0];\n\t\t\tdigest34[index + 2][1] = digest34[index + 1][1] = digest34[index][1] = cur_salt->salt[1];\n\t\t\tdigest34[index + 2][2] = digest34[index + 1][2] = digest34[index][2] = cur_salt->salt[2];\n\t\t\tdigest34[index + 2][3] = digest34[index + 1][3] = digest34[index][3] = cur_salt->salt[3];\n\t\t\tdigest34[index + 2][4] = digest34[index + 1][4] = digest34[index][4] = cur_salt->salt[4];\n\t\t\tdigest34[index + 2][5] = digest34[index + 1][5] = digest34[index][5] = '(';\n\t\t}\n\n\t\tdomino_big_md_3_34(digest34[index], digest34[index + 1],\n\t\t\t\tdigest34[index + 2],\n\t\t\t\t(unsigned char *)crypt_out[index],\n\t\t\t\t(unsigned char *)crypt_out[index + 1],\n\t\t\t\t(unsigned char *)crypt_out[index + 2]);\n\n\t\tfor (i= 0; i < 3; i++) {\n\t\t\t// domino 8(.5.x) hash - SEC_pwddigest_V3 - -m 9100\n\t\t\tunsigned char buffer[22 + 1] = {0};\n\t\t\tunsigned char tmp_hash[22 + 1 + 3 /* \"(G)\" */] = {0};\n\t\t\tmemcpy(tmp_hash, cur_salt->salt, 5);\n\t\t\tmemcpy(tmp_hash + 5, crypt_out[index + i], 16);\n\t\t\tdomino_encode(tmp_hash, buffer);\n\t\t\tsprintf((char*)tmp_hash, \"(G%s)\", buffer);\n\t\t\tpbkdf2_sha1(tmp_hash, 22, cur_salt->salt, 16, cur_salt->iterations, (unsigned char *)crypt_out_real[index+i], 8, 0);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/citrix_ns_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt;\n\tint index;\n\tint loops = (count + MIN_KEYS_PER_CRYPT - 1) / MIN_KEYS_PER_CRYPT;\n\n#ifdef _OPENMP\nfor (index = 0; index < loops; ++index) {\n#ifdef SIMD_COEF_32\n\t\tSIMDSHA1body(saved_key[index], (unsigned int*)crypt_key[index], NULL, SSEi_MIXED_IN);\n#else\n\t\tSHA_CTX ctx;\n\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, (unsigned char*)saved_salt, SALT_SIZE);\n\t\tSHA1_Update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]) + 1);\n\t\tSHA1_Final((unsigned char*)crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/BF_std.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t, int n)\n{\n#if BF_mt > 1\n\tint t;\n#endif\n\n#if BF_mt > 1 && defined(_OPENMP)\n#if defined(WITH_UBSAN)\n#else\n#pragma omp parallel for default(none) private(t) shared(n, BF_init_state, BF_init_key, BF_exp_key, salt, BF_magic_w, BF_out)\n\n\n\tfor_each_t() {\n#if BF_mt > 1\n#if BF_X2 == 3\n\t\tstruct BF_ctx BF_current[3];\n#elif BF_X2\n\t\tstruct BF_ctx BF_current[2];\n#else\n\t\tstruct BF_ctx BF_current;\n\n\n\n\t\tBF_word L0, R0;\n\t\tBF_word u1, u2, u3, u4;\n#if BF_X2\n\t\tBF_word L1, R1;\n\t\tBF_word v1, v2, v3, v4;\n#if BF_X2 == 3\n\t\tBF_word L2, R2;\n\t\tBF_word w1, w2, w3, w4;\n\n\n\t\tBF_word *ptr;\n\t\tBF_word count;\n#if BF_N > 1\n\t\tint index;\n\n#if BF_X2 == 3 && BF_mt > 1\n\t\tint lindex;\n\n\n\t\tfor_each_ti() {\n\t\t\tint i;\n\n\t\t\tmemcpy(BF_current INDEX2.S,\n\t\t\t BF_init_state.S, sizeof(BF_current INDEX2.S));\n\t\t\tmemcpy(BF_current INDEX2.P,\n\t\t\t BF_init_key INDEX, sizeof(BF_current INDEX2.P));\n\n\t\t\tL0 = R0 = 0;\n\t\t\tfor (i = 0; i < BF_ROUNDS + 2; i += 2) {\n\t\t\t\tL0 ^= salt->salt[i & 2];\n\t\t\t\tR0 ^= salt->salt[(i & 2) + 1];\n\t\t\t\tBF_ENCRYPT(BF_current INDEX2, L0, R0);\n\t\t\t\tBF_current INDEX2.P[i] = L0;\n\t\t\t\tBF_current INDEX2.P[i + 1] = R0;\n\t\t\t}\n\n\t\t\tptr = BF_current INDEX2.S[0];\n\t\t\tdo {\n\t\t\t\tptr += 4;\n\t\t\t\tL0 ^= salt->salt[(BF_ROUNDS + 2) & 3];\n\t\t\t\tR0 ^= salt->salt[(BF_ROUNDS + 3) & 3];\n\t\t\t\tBF_ENCRYPT(BF_current INDEX2, L0, R0);\n\t\t\t\t*(ptr - 4) = L0;\n\t\t\t\t*(ptr - 3) = R0;\n\n\t\t\t\tL0 ^= salt->salt[(BF_ROUNDS + 4) & 3];\n\t\t\t\tR0 ^= salt->salt[(BF_ROUNDS + 5) & 3];\n\t\t\t\tBF_ENCRYPT(BF_current INDEX2, L0, R0);\n\t\t\t\t*(ptr - 2) = L0;\n\t\t\t\t*(ptr - 1) = R0;\n\t\t\t} while (ptr < &BF_current INDEX2.S[3][0xFF]);\n\t\t}\n\n\t\tcount = 1 << salt->rounds;\n\t\tdo {\n\t\t\tfor_each_ti() {\n\t\t\t\tBF_current INDEX2.P[0] ^= BF_exp_key INDEX[0];\n\t\t\t\tBF_current INDEX2.P[1] ^= BF_exp_key INDEX[1];\n\t\t\t\tBF_current INDEX2.P[2] ^= BF_exp_key INDEX[2];\n\t\t\t\tBF_current INDEX2.P[3] ^= BF_exp_key INDEX[3];\n\t\t\t\tBF_current INDEX2.P[4] ^= BF_exp_key INDEX[4];\n\t\t\t\tBF_current INDEX2.P[5] ^= BF_exp_key INDEX[5];\n\t\t\t\tBF_current INDEX2.P[6] ^= BF_exp_key INDEX[6];\n\t\t\t\tBF_current INDEX2.P[7] ^= BF_exp_key INDEX[7];\n\t\t\t\tBF_current INDEX2.P[8] ^= BF_exp_key INDEX[8];\n\t\t\t\tBF_current INDEX2.P[9] ^= BF_exp_key INDEX[9];\n\t\t\t\tBF_current INDEX2.P[10] ^= BF_exp_key INDEX[10];\n\t\t\t\tBF_current INDEX2.P[11] ^= BF_exp_key INDEX[11];\n\t\t\t\tBF_current INDEX2.P[12] ^= BF_exp_key INDEX[12];\n\t\t\t\tBF_current INDEX2.P[13] ^= BF_exp_key INDEX[13];\n\t\t\t\tBF_current INDEX2.P[14] ^= BF_exp_key INDEX[14];\n\t\t\t\tBF_current INDEX2.P[15] ^= BF_exp_key INDEX[15];\n\t\t\t\tBF_current INDEX2.P[16] ^= BF_exp_key INDEX[16];\n\t\t\t\tBF_current INDEX2.P[17] ^= BF_exp_key INDEX[17];\n\t\t\t}\n\n\t\t\tBF_body();\n\n\t\t\tu1 = salt->salt[0];\n\t\t\tu2 = salt->salt[1];\n\t\t\tu3 = salt->salt[2];\n\t\t\tu4 = salt->salt[3];\n\t\t\tfor_each_ti() {\n\t\t\t\tBF_current INDEX2.P[0] ^= u1;\n\t\t\t\tBF_current INDEX2.P[1] ^= u2;\n\t\t\t\tBF_current INDEX2.P[2] ^= u3;\n\t\t\t\tBF_current INDEX2.P[3] ^= u4;\n\t\t\t\tBF_current INDEX2.P[4] ^= u1;\n\t\t\t\tBF_current INDEX2.P[5] ^= u2;\n\t\t\t\tBF_current INDEX2.P[6] ^= u3;\n\t\t\t\tBF_current INDEX2.P[7] ^= u4;\n\t\t\t\tBF_current INDEX2.P[8] ^= u1;\n\t\t\t\tBF_current INDEX2.P[9] ^= u2;\n\t\t\t\tBF_current INDEX2.P[10] ^= u3;\n\t\t\t\tBF_current INDEX2.P[11] ^= u4;\n\t\t\t\tBF_current INDEX2.P[12] ^= u1;\n\t\t\t\tBF_current INDEX2.P[13] ^= u2;\n\t\t\t\tBF_current INDEX2.P[14] ^= u3;\n\t\t\t\tBF_current INDEX2.P[15] ^= u4;\n\t\t\t\tBF_current INDEX2.P[16] ^= u1;\n\t\t\t\tBF_current INDEX2.P[17] ^= u2;\n\t\t\t}\n\n\t\t\tBF_body();\n\t\t} while (--count);\n\n#if BF_mt == 1\n\t\tfor_each_ti() {\n\t\t\tL0 = BF_magic_w[0];\n\t\t\tR0 = BF_magic_w[1];\n\n\t\t\tcount = 64;\n\t\t\tdo {\n\t\t\t\tBF_ENCRYPT(BF_current INDEX, L0, R0);\n\t\t\t} while (--count);\n\n\t\t\tBF_out INDEX0[0] = L0;\n\t\t\tBF_out INDEX0[1] = R0;\n\t\t}\n#else\n\t\tfor_each_ti() {\n\t\t\tBF_word L, R;\n\t\t\tBF_word u1, u2, u3, u4;\n\t\t\tBF_word count;\n\t\t\tint i;\n\n\t\t\tmemcpy(&BF_out[index], &BF_magic_w,\n\t\t\t sizeof(BF_out[index]));\n\n\t\t\tcount = 64;\n\t\t\tdo\n\t\t\tfor (i = 0; i < 6; i += 2) {\n\t\t\t\tL = BF_out[index][i];\n\t\t\t\tR = BF_out[index][i + 1];\n\t\t\t\tBF_ENCRYPT(BF_current INDEX2, L, R);\n\t\t\t\tBF_out[index][i] = L;\n\t\t\t\tBF_out[index][i + 1] = R;\n\t\t\t} while (--count);\n\n/* This has to be bug-compatible with the original implementation :-) */\n\t\t\tBF_out[index][5] &= ~(BF_word)0xFF;\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/BF_std.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(t) shared(n, BF_init_state, BF_init_key, BF_exp_key, salt, BF_magic_w, BF_out)", "context_chars": 100, "text": "t;\n#endif\n\n#if BF_mt > 1 && defined(_OPENMP)\n#if defined(WITH_UBSAN)\n#pragma omp parallel for\n#else\nfor_each_t() {\n#if BF_mt > 1\n#if BF_X2 == 3\n\t\tstruct BF_ctx BF_current[3];\n#elif BF_X2\n\t\tstruct BF_ctx BF_current[2];\n#else\n\t\tstruct BF_ctx BF_current;\n\n\n\n\t\tBF_word L0, R0;\n\t\tBF_word u1, u2, u3, u4;\n#if BF_X2\n\t\tBF_word L1, R1;\n\t\tBF_word v1, v2, v3, v4;\n#if BF_X2 == 3\n\t\tBF_word L2, R2;\n\t\tBF_word w1, w2, w3, w4;\n\n\n\t\tBF_word *ptr;\n\t\tBF_word count;\n#if BF_N > 1\n\t\tint index;\n\n#if BF_X2 == 3 && BF_mt > 1\n\t\tint lindex;\n\n\n\t\tfor_each_ti() {\n\t\t\tint i;\n\n\t\t\tmemcpy(BF_current INDEX2.S,\n\t\t\t BF_init_state.S, sizeof(BF_current INDEX2.S));\n\t\t\tmemcpy(BF_current INDEX2.P,\n\t\t\t BF_init_key INDEX, sizeof(BF_current INDEX2.P));\n\n\t\t\tL0 = R0 = 0;\n\t\t\tfor (i = 0; i < BF_ROUNDS + 2; i += 2) {\n\t\t\t\tL0 ^= salt->salt[i & 2];\n\t\t\t\tR0 ^= salt->salt[(i & 2) + 1];\n\t\t\t\tBF_ENCRYPT(BF_current INDEX2, L0, R0);\n\t\t\t\tBF_current INDEX2.P[i] = L0;\n\t\t\t\tBF_current INDEX2.P[i + 1] = R0;\n\t\t\t}\n\n\t\t\tptr = BF_current INDEX2.S[0];\n\t\t\tdo {\n\t\t\t\tptr += 4;\n\t\t\t\tL0 ^= salt->salt[(BF_ROUNDS + 2) & 3];\n\t\t\t\tR0 ^= salt->salt[(BF_ROUNDS + 3) & 3];\n\t\t\t\tBF_ENCRYPT(BF_current INDEX2, L0, R0);\n\t\t\t\t*(ptr - 4) = L0;\n\t\t\t\t*(ptr - 3) = R0;\n\n\t\t\t\tL0 ^= salt->salt[(BF_ROUNDS + 4) & 3];\n\t\t\t\tR0 ^= salt->salt[(BF_ROUNDS + 5) & 3];\n\t\t\t\tBF_ENCRYPT(BF_current INDEX2, L0, R0);\n\t\t\t\t*(ptr - 2) = L0;\n\t\t\t\t*(ptr - 1) = R0;\n\t\t\t} while (ptr < &BF_current INDEX2.S[3][0xFF]);\n\t\t}\n\n\t\tcount = 1 << salt->rounds;\n\t\tdo {\n\t\t\tfor_each_ti() {\n\t\t\t\tBF_current INDEX2.P[0] ^= BF_exp_key INDEX[0];\n\t\t\t\tBF_current INDEX2.P[1] ^= BF_exp_key INDEX[1];\n\t\t\t\tBF_current INDEX2.P[2] ^= BF_exp_key INDEX[2];\n\t\t\t\tBF_current INDEX2.P[3] ^= BF_exp_key INDEX[3];\n\t\t\t\tBF_current INDEX2.P[4] ^= BF_exp_key INDEX[4];\n\t\t\t\tBF_current INDEX2.P[5] ^= BF_exp_key INDEX[5];\n\t\t\t\tBF_current INDEX2.P[6] ^= BF_exp_key INDEX[6];\n\t\t\t\tBF_current INDEX2.P[7] ^= BF_exp_key INDEX[7];\n\t\t\t\tBF_current INDEX2.P[8] ^= BF_exp_key INDEX[8];\n\t\t\t\tBF_current INDEX2.P[9] ^= BF_exp_key INDEX[9];\n\t\t\t\tBF_current INDEX2.P[10] ^= BF_exp_key INDEX[10];\n\t\t\t\tBF_current INDEX2.P[11] ^= BF_exp_key INDEX[11];\n\t\t\t\tBF_current INDEX2.P[12] ^= BF_exp_key INDEX[12];\n\t\t\t\tBF_current INDEX2.P[13] ^= BF_exp_key INDEX[13];\n\t\t\t\tBF_current INDEX2.P[14] ^= BF_exp_key INDEX[14];\n\t\t\t\tBF_current INDEX2.P[15] ^= BF_exp_key INDEX[15];\n\t\t\t\tBF_current INDEX2.P[16] ^= BF_exp_key INDEX[16];\n\t\t\t\tBF_current INDEX2.P[17] ^= BF_exp_key INDEX[17];\n\t\t\t}\n\n\t\t\tBF_body();\n\n\t\t\tu1 = salt->salt[0];\n\t\t\tu2 = salt->salt[1];\n\t\t\tu3 = salt->salt[2];\n\t\t\tu4 = salt->salt[3];\n\t\t\tfor_each_ti() {\n\t\t\t\tBF_current INDEX2.P[0] ^= u1;\n\t\t\t\tBF_current INDEX2.P[1] ^= u2;\n\t\t\t\tBF_current INDEX2.P[2] ^= u3;\n\t\t\t\tBF_current INDEX2.P[3] ^= u4;\n\t\t\t\tBF_current INDEX2.P[4] ^= u1;\n\t\t\t\tBF_current INDEX2.P[5] ^= u2;\n\t\t\t\tBF_current INDEX2.P[6] ^= u3;\n\t\t\t\tBF_current INDEX2.P[7] ^= u4;\n\t\t\t\tBF_current INDEX2.P[8] ^= u1;\n\t\t\t\tBF_current INDEX2.P[9] ^= u2;\n\t\t\t\tBF_current INDEX2.P[10] ^= u3;\n\t\t\t\tBF_current INDEX2.P[11] ^= u4;\n\t\t\t\tBF_current INDEX2.P[12] ^= u1;\n\t\t\t\tBF_current INDEX2.P[13] ^= u2;\n\t\t\t\tBF_current INDEX2.P[14] ^= u3;\n\t\t\t\tBF_current INDEX2.P[15] ^= u4;\n\t\t\t\tBF_current INDEX2.P[16] ^= u1;\n\t\t\t\tBF_current INDEX2.P[17] ^= u2;\n\t\t\t}\n\n\t\t\tBF_body();\n\t\t} while (--count);\n\n#if BF_mt == 1\n\t\tfor_each_ti() {\n\t\t\tL0 = BF_magic_w[0];\n\t\t\tR0 = BF_magic_w[1];\n\n\t\t\tcount = 64;\n\t\t\tdo {\n\t\t\t\tBF_ENCRYPT(BF_current INDEX, L0, R0);\n\t\t\t} while (--count);\n\n\t\t\tBF_out INDEX0[0] = L0;\n\t\t\tBF_out INDEX0[1] = R0;\n\t\t}\n#else\n\t\tfor_each_ti() {\n\t\t\tBF_word L, R;\n\t\t\tBF_word u1, u2, u3, u4;\n\t\t\tBF_word count;\n\t\t\tint i;\n\n\t\t\tmemcpy(&BF_out[index], &BF_magic_w,\n\t\t\t sizeof(BF_out[index]));\n\n\t\t\tcount = 64;\n\t\t\tdo\n\t\t\tfor (i = 0; i < 6; i += 2) {\n\t\t\t\tL = BF_out[index][i];\n\t\t\t\tR = BF_out[index][i + 1];\n\t\t\t\tBF_ENCRYPT(BF_current INDEX2, L, R);\n\t\t\t\tBF_out[index][i] = L;\n\t\t\t\tBF_out[index][i + 1] = R;\n\t\t\t} while (--count);\n\n/* This has to be bug-compatible with the original implementation :-) */\n\t\t\tBF_out[index][5] &= ~(BF_word)0xFF;\n\t\t}\n\n\t} #pragma omp parallel for default(none) private(t) shared(n, BF_init_state, BF_init_key, BF_exp_key, salt, BF_magic_w, BF_out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/keyring_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 0;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index+=MIN_KEYS_PER_CRYPT)\n\t{\n\t\tint i;\n\t\tunsigned char (*buffers)[sizeof(cur_salt->ct)];\n\n\t\t// This is too big to be on stack. See #1292.\n\t\tbuffers = mem_alloc(MIN_KEYS_PER_CRYPT * sizeof(*buffers));\n\n\t\tdecrypt_buffer(buffers, index);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tif (verify_decrypted_buffer(buffers[i], cur_salt->crypto_size)) {\n\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t\tMEM_FREE(buffers);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/office_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " inc =\n\t\t(cur_salt->version == 2013) ? SHA512_LOOP_CNT : SHA1_LOOP_CNT;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += inc) {\n\t\tif (cur_salt->version == 2007)\n\t\t\tGeneratePasswordHashUsingSHA1(index, &encryptionKey[index]);\n\t\telse if (cur_salt->version == 2010)\n\t\t\tGenerateAgileEncryptionKey(index, &verifierKeys1[index]);\n\t\telse //if (cur_salt->version == 2013)\n\t\t\tGenerateAgileEncryptionKey512(index, &verifierKeys512[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/office_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int count)\n{\n\tms_office_binary_blob *blob = ((fmt_data*)binary)->blob;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (cur_salt->version == 2007)\n\t\t\tcracked[index] = PasswordVerifier(blob, encryptionKey[index]);\n\t\telse if (cur_salt->version == 2010) {\n\t\t\tuint8_t decryptedVerifierHashInputBytes[16];\n\t\t\tuint8_t decryptedVerifierHashBytes[32];\n\t\t\tuint8_t hash[20];\n\t\t\tSHA_CTX ctx;\n\n\t\t\tDecryptUsingSymmetricKeyAlgorithm(cur_salt, verifierKeys1[index],\n\t\t\t blob->encryptedVerifier,\n\t\t\t decryptedVerifierHashInputBytes,\n\t\t\t 16);\n\t\t\tDecryptUsingSymmetricKeyAlgorithm(cur_salt,\n\t\t\t &verifierKeys1[index][32],\n\t\t\t blob->encryptedVerifierHash,\n\t\t\t decryptedVerifierHashBytes, 32);\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, decryptedVerifierHashInputBytes, 16);\n\t\t\tSHA1_Final(hash, &ctx);\n\n\t\t\tcracked[index] = !memcmp(hash, decryptedVerifierHashBytes, 20);\n\t\t}\n\t\telse /* if (cur_salt->version == 2013) */ {\n\t\t\tuint8_t decryptedVerifierHashInputBytes[16];\n\t\t\tuint8_t decryptedVerifierHashBytes[32];\n\t\t\tuint8_t hash[64];\n\t\t\tSHA512_CTX ctx;\n\n\t\t\tDecryptUsingSymmetricKeyAlgorithm(cur_salt, verifierKeys512[index],\n\t\t\t blob->encryptedVerifier,\n\t\t\t decryptedVerifierHashInputBytes,\n\t\t\t 16);\n\t\t\tDecryptUsingSymmetricKeyAlgorithm(cur_salt,\n\t\t\t &verifierKeys512[index][64],\n\t\t\t blob->encryptedVerifierHash,\n\t\t\t decryptedVerifierHashBytes, 32);\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, decryptedVerifierHashInputBytes, 16);\n\t\t\tSHA512_Final(hash, &ctx);\n\n\t\t\tcracked[index] = !memcmp(hash, decryptedVerifierHashBytes, 20);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/wpapsk_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(j) shared(count, salt, in, out)", "context_chars": 100, "text": "(int count,\n wpapsk_password *in, wpapsk_hash *out, wpapsk_salt *salt)\n{\n\tint j;\n\n#ifdef _OPENMP\nfor (j = 0; j < count; j++) {\n\t\tpbkdf2_sha1((const unsigned char*)(in[j].v),\n\t\t in[j].length,\n\t\t salt->essid, salt->length,\n\t\t 4096, (unsigned char*)&out[j],\n\t\t 32, 0);\n\t} #pragma omp parallel for default(none) private(j) shared(count, salt, in, out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/wpapsk_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(t) shared(count, salt, in, out, loops)", "context_chars": 100, "text": "papsk_salt *salt)\n{\n\tint t; // thread count\n\tint loops = (count+NBKEYS-1) / NBKEYS;\n\n#ifdef _OPENMP\nfor (t = 0; t < loops; t++) {\n\t\tint lens[NBKEYS], i;\n\t\tunsigned char *pin[NBKEYS];\n\t\tunion {\n\t\t\tuint32_t *pout[NBKEYS];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < NBKEYS; ++i) {\n\t\t\tlens[i] = in[t*NBKEYS+i].length;\n\t\t\tpin[i] = (unsigned char*)in[t*NBKEYS+i].v;\n\t\t\tx.pout[i] = &out[t*NBKEYS+i].v[0];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens,\n\t\t salt->essid, salt->length,\n\t\t 4096, &(x.poutc),\n\t\t 32, 0);\n\t} #pragma omp parallel for default(none) private(t) shared(count, salt, in, out, loops)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/oracle12c_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tint index;\n\tconst int count = *pcount;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tSHA512_CTX ctx;\n\t\tint i = 0;\n#if SIMD_COEF_64\n\t\tint lens[SSE_GROUP_SZ_SHA512];\n\t\tunsigned char *pin[SSE_GROUP_SZ_SHA512];\n\t\tunion {\n\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA512];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = (uint32_t*)(crypt_out[index+i]);\n\t\t}\n\t\tpbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt,\n\t\t cur_salt->saltlen, 4096, &(x.poutc), BINARY_SIZE, 0);\n#else\n\t\tpbkdf2_sha512((const unsigned char*)saved_key[index],\n\t\t strlen(saved_key[index]), cur_salt->salt,\n\t\t cur_salt->saltlen, 4096,\n\t\t (unsigned char*)crypt_out[index], BINARY_SIZE, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, (unsigned char*)crypt_out[index + i], BINARY_SIZE);\n\t\t\tSHA512_Update(&ctx, cur_salt->salt, 16); // AUTH_VFR_DATA first 16 bytes\n\t\t\tSHA512_Final((unsigned char*)crypt_out[index + i], &ctx);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sha1crypt_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ll(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index = 0;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SSE_GROUP_SZ_SHA1\n\t\tint lens[SSE_GROUP_SZ_SHA1], i;\n\t\tunsigned char *pin[SSE_GROUP_SZ_SHA1];\n\t\tunion {\n\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA1];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = crypt_out[index+i];\n\t\t}\n\t\tpbkdf1_sha1_sse((const unsigned char **)pin, lens,\n\t\t cur_salt->salt, cur_salt->length,\n\t\t cur_salt->rounds, &(x.poutc),\n\t\t BINARY_SIZE, 0);\n#else\n\t\tpbkdf1_sha1((const unsigned char*)(saved_key[index]),\n\t\t strlen(saved_key[index]),\n\t\t cur_salt->salt, cur_salt->length,\n\t\t cur_salt->rounds, (unsigned char*)crypt_out[index],\n\t\t BINARY_SIZE, 0);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawBLAKE2_512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\t(void)blake2b((uint8_t *)crypt_out[index], saved_key[index], NULL, 64, saved_len[index], 0);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/as400_des_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tDES_cblock des_key;\n\t\tDES_key_schedule schedule;\n\t\tint i;\n\t\tint saved_key_length = strlen(saved_key[index]);\n\n\t\tif (saved_key_length <= 8) {\n\t\t\t/* process key */\n\t\t\tfor (i = 0; saved_key[index][i]; i++)\n\t\t\t\tdes_key[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][i])];\n\n\t\t\t/* replace missing characters in password by (EBCDIC space (0x40) XOR 0x55) << 1 */\n\t\t\twhile(i < 8)\n\t\t\t\tdes_key[i++] = 0x2a;\n\n\t\t\tDES_set_key_unchecked(&des_key, &schedule);\n\t\t\t/* do encryption */\n\t\t\tDES_ecb_encrypt((const_DES_cblock*)cur_salt->userid, (DES_cblock*)crypt_out[index], &schedule, DES_ENCRYPT);\n\t\t}\n\t\telse {\n\t\t\tDES_cblock des_key1, des_key2;\n\t\t\tDES_key_schedule schedule1, schedule2;\n\t\t\tDES_cblock hash_1, hash_2;\n\t\t\tunsigned char output[8];\n\n\t\t\t/* process key */\n\t\t\tfor (i = 0; i < 8; i++)\n\t\t\t\tdes_key1[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][i])];\n\n\t\t\tfor (i = 0; i < saved_key_length-8; i++)\n\t\t\t\tdes_key2[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][8+i])];\n\n\t\t\t/* replace missing characters in password by (EBCDIC space (0x40) XOR 0x55) << 1 */\n\t\t\twhile(i < 8)\n\t\t\t\tdes_key2[i++] = 0x2a;\n\n\t\t\tDES_set_key_unchecked(&des_key1, &schedule1);\n\t\t\tDES_ecb_encrypt((const_DES_cblock*)cur_salt->userid, &hash_1, &schedule1, DES_ENCRYPT);\n\t\t\tDES_set_key_unchecked(&des_key2, &schedule2);\n\t\t\tDES_ecb_encrypt((const_DES_cblock*)cur_salt->userid, &hash_2, &schedule2, DES_ENCRYPT);\n\t\t\tfor (i = 0; i < 8; i++) {\n\t\t\t\toutput[i] = hash_1[i] ^ hash_2[i];\n\t\t\t}\n\n\t\t\tmemcpy((unsigned char*)crypt_out[index], output, 8);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/money_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0]) * cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char key[24];\n\t\tunsigned char out[32];\n\n\t\tif (cur_salt->type == 0) {\n\t\t\tMD5_CTX mctx;\n\n\t\t\tMD5_Init(&mctx);\n\t\t\tMD5_Update(&mctx, saved_key[index], PASSWORD_LENGTH);\n\t\t\tMD5_Final(key, &mctx);\n\t\t} else if (cur_salt->type == 1) {\n\t\t\tSHA_CTX sctx;\n\n\t\t\tSHA1_Init(&sctx);\n\t\t\tSHA1_Update(&sctx, saved_key[index], PASSWORD_LENGTH);\n\t\t\tSHA1_Final(key, &sctx);\n\t\t}\n\n\t\t// combine key[:16] + salt into a key\n\t\tmemcpy(key + PASSWORD_DIGEST_LENGTH, cur_salt->salt, 8);\n\t\tRC4_single(key, 24, cur_salt->encrypted_bytes, 4, out);\n\t\tif (memcmp(out, cur_salt->salt, 4) == 0)\n\t\t\tcracked[index] = 1;\n\t\telse\n\t\t\tcracked[index] = 0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ssha512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index+=MIN_KEYS_PER_CRYPT) {\n#ifndef SIMD_COEF_64\n\t\tSHA512_CTX ctx;\n\t\tSHA512_Init(&ctx);\n\t\tSHA512_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA512_Update(&ctx, saved_salt->data.c, saved_salt->len);\n\t\tSHA512_Final((unsigned char*)crypt_out[index], &ctx);\n#else\n\t\t// We have to append salt (and re-clean buffer if it is dirty),\n\t\t// then append final length of password.salt\n\t\tint i, j;\n\t\tunsigned char *sk = (unsigned char*)saved_key;\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tint idx = i+index;\n\t\t\tint x = saved_len[idx];\n\t\t\tfor (j = 0; j < saved_salt->len; ++j)\n\t\t\t\tsk[GETPOS(x+j,idx)] = saved_salt->data.c[j];\n\t\t\tx += j;\n\t\t\tsk[GETPOS(x,idx)] = 0x80;\n\t\t\t++x;\n\t\t\twhile (sk[GETPOS(x,idx)]) {\n\t\t\t\tsk[GETPOS(x,idx)] = 0;\n\t\t\t\t++x;\n\t\t\t}\n\t\t\t*(len_ptr64[idx]) = (saved_len[idx]+saved_salt->len)<<3;\n\t\t}\n\t\tSIMDSHA512body(&saved_key[index/SIMD_COEF_64], crypt_out[index/SIMD_COEF_64], NULL, SSEi_MIXED_IN);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/hmacSHA512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "EX_FLAGS\n#else\n\tconst int B_LEN\n#endif\n\t)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_64\n\t\tunsigned int i;\n\n\t\tif (new_keys) {\n\t\t\tSIMDSHA512body(&ipad[index * PAD_SIZE],\n\t\t\t (uint64_t*)&prep_ipad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN|EX_FLAGS);\n\t\t\tSIMDSHA512body(&opad[index * PAD_SIZE],\n\t\t\t (uint64_t*)&prep_opad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN|EX_FLAGS);\n\t\t}\n\n\t\tSIMDSHA512body(cur_salt->salt[0],\n\t\t\t (uint64_t*)&crypt_key[index * PAD_SIZE],\n\t\t\t (uint64_t*)&prep_ipad[index * BINARY_SIZE],\n\t\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);\n\t\tfor (i = 1; i <= (cur_salt->salt_len + 16) / PAD_SIZE; i++)\n\t\t\tSIMDSHA512body(cur_salt->salt[i],\n\t\t\t (uint64_t*)&crypt_key[index * PAD_SIZE],\n\t\t\t (uint64_t*)&crypt_key[index * PAD_SIZE],\n\t\t\t SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);\n\n\t\tif (EX_FLAGS) {\n\t\t\t// NOTE, SSESHA384 will output 64 bytes. We need the first 48 (plus the 0x80 padding).\n\t\t\t// so we are forced to 'clean' this crap up, before using the crypt as the input.\n\t\t\tuint64_t *pclear = (uint64_t*)&crypt_key[index/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64*8];\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\t\tpclear[48/8*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64] = 0x8000000000000000ULL;\n\t\t\t\tpclear[48/8*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64+SIMD_COEF_64] = 0;\n\t\t\t}\n\t\t}\n\n\t\tSIMDSHA512body(&crypt_key[index * PAD_SIZE],\n\t\t (uint64_t*)&crypt_key[index * PAD_SIZE],\n\t\t (uint64_t*)&prep_opad[index * BINARY_SIZE],\n\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);\n#else\n\t\tSHA512_CTX ctx;\n\n\t\t// Note, for oSSL, we really only need SHA512_Init and SHA384_Init. From that point\n\t\t// on, SHA512_Update/SHA512_Final can be used. Also, jtr internal sha2.c file works\n\t\t// like that. BUT I am not sure every hash engine works that way, so we are keeping\n\t\t// the 'full' block.\n\t\tif (B_LEN == BINARY_SIZE) {\n\t\t\tif (new_keys) {\n\t\t\t\tSHA512_Init(&ipad_ctx[index]);\n\t\t\t\tSHA512_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);\n\t\t\t\tSHA512_Init(&opad_ctx[index]);\n\t\t\t\tSHA512_Update(&opad_ctx[index], opad[index], PAD_SIZE);\n\t\t\t}\n\n\t\t\tmemcpy(&ctx, &ipad_ctx[index], sizeof(ctx));\n\t\t\tSHA512_Update( &ctx, cur_salt, strlen( (char*) cur_salt) );\n\t\t\tSHA512_Final( (unsigned char*) crypt_key[index], &ctx);\n\n\t\t\tmemcpy(&ctx, &opad_ctx[index], sizeof(ctx));\n\t\t\tSHA512_Update( &ctx, crypt_key[index], B_LEN);\n\t\t\tSHA512_Final( (unsigned char*) crypt_key[index], &ctx);\n\t\t} else {\n\t\t\tif (new_keys) {\n\t\t\t\tSHA384_Init(&ipad_ctx[index]);\n\t\t\t\tSHA384_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);\n\t\t\t\tSHA384_Init(&opad_ctx[index]);\n\t\t\t\tSHA384_Update(&opad_ctx[index], opad[index], PAD_SIZE);\n\t\t\t}\n\n\t\t\tmemcpy(&ctx, &ipad_ctx[index], sizeof(ctx));\n\t\t\tSHA384_Update( &ctx, cur_salt, strlen( (char*) cur_salt) );\n\t\t\tSHA384_Final( (unsigned char*) crypt_key[index], &ctx);\n\n\t\t\tmemcpy(&ctx, &opad_ctx[index], sizeof(ctx));\n\t\t\tSHA384_Update( &ctx, crypt_key[index], B_LEN);\n\t\t\tSHA384_Final( (unsigned char*) crypt_key[index], &ctx);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/BFEgg_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (saved_key[index][0] != 0)\n\t\t\tblowfish_encrypt_pass(saved_key[index],\n\t\t\t\t(char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/bestcrypt_ve_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 0;\n\t}\n\n\tstatic const yescrypt_params_t params = { .N = 0x8000, .r = 16, .p = 1 };\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char kdf_out[32];\n\t\t/* BestCrypt uses CBC mode with a null IV */\n\t\tunsigned char iv[16] = {0};\n\t\tunsigned char out[sizeof(cur_salt->encrypted_data)];\n\n\t\tSHA256_CTX ctx;\n\t\tunsigned char sha256_hash[32];\n\n#ifdef _OPENMP\n\t\tint t = omp_get_thread_num();\n\t\tif (t >= max_threads) {\n\t\t\tfailed = -1;\n\t\t\tcontinue;\n\t\t}\n#else\n\t\tconst int t = 0;\n\n\t\tif (yescrypt_kdf(NULL, &local[t],\n\t\t (const uint8_t *)saved_key[index],\n\t\t strlen(saved_key[index]),\n\t\t (const uint8_t *)cur_salt->salt,\n\t\t sizeof(cur_salt->salt),\n\t\t ¶ms,\n\t\t kdf_out, sizeof(kdf_out))){\n\t\t\tfailed = errno ? errno : EINVAL;\n#ifndef _OPENMP\n\t\t\tbreak;\n\n\t\t}\n\t\t/*\n\t\t we will now use output of scrypt as key for desired encryption\n\t\t algorithm in CBC mode\n\t\t*/\n\t\tif (cur_salt->enc_algoID == aesId) {\n\t\t\tAES_KEY aes_key;\n\n\t\t\tAES_set_decrypt_key(kdf_out, 256, &aes_key);\n\t\t\tAES_cbc_encrypt(cur_salt->encrypted_data,\n\t\t\t out,\n\t\t\t sizeof(cur_salt->encrypted_data),\n\t\t\t &aes_key,\n\t\t\t iv,\n\t\t\t AES_DECRYPT);\n\t\t} else if(cur_salt->enc_algoID == twofishId) {\n\t\t\tTwofish_key tkey;\n\n\t\t\tTwofish_prepare_key(kdf_out, sizeof(kdf_out), &tkey);\n\t\t\tTwofish_Decrypt_no_padding(&tkey,\n\t\t\t cur_salt->encrypted_data,\n\t\t\t out,\n\t\t\t sizeof(cur_salt->encrypted_data),\n\t\t\t iv);\n\t\t} else if(cur_salt->enc_algoID == serpentId) {\n\t\t\tuint8_t ks[SERPENT_KS];\n\n\t\t\tserpent_set_key(kdf_out, ks);\n\t\t\tserpent_cbc_decrypt(cur_salt->encrypted_data,\n\t\t\t out,\n\t\t\t sizeof(cur_salt->encrypted_data),\n\t\t\t ks,\n\t\t\t iv);\n\t\t} else if(cur_salt->enc_algoID == camelliaId) {\n\t\t\tCAMELLIA_KEY ck;\n\n\t\t\tCamellia_set_key(kdf_out, 256, &ck);\n\t\t\tCamellia_cbc_encrypt(cur_salt->encrypted_data,\n\t\t\t out,\n\t\t\t sizeof(cur_salt->encrypted_data),\n\t\t\t &ck,\n\t\t\t iv,\n\t\t\t CAMELLIA_DECRYPT);\n\t\t} /* else if(cur_salt->enc_algoID == rc6Id) {\n\t\t\tTODO\n\t\t}\n\t\t*/\n\n\t\t/* we now compute sha256(decrypted_content[0:0x40]) and\n\t\tcompare it with decrypted_content[0x40:0x60] */\n\t\tSHA256_Init(&ctx);\n\t\tSHA256_Update(&ctx, out, 0x40);\n\t\tSHA256_Final(sha256_hash, &ctx);\n\t\tcracked[index] = (0 == memcmp(sha256_hash, out + 0x40, 0x20));\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\tany_cracked |= 1;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pbkdf2-hmac-md5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#if SIMD_COEF_32\n\t\tint lens[SSE_GROUP_SZ_MD5], i;\n\t\tunsigned char *pin[SSE_GROUP_SZ_MD5];\n\t\tunion {\n\t\t\tuint32_t *pout[SSE_GROUP_SZ_MD5];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < SSE_GROUP_SZ_MD5; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = crypt_out[index+i];\n\t\t}\n\t\tpbkdf2_md5_sse((const unsigned char **)pin, lens,\n\t\t (unsigned char*)cur_salt->salt, cur_salt->length,\n\t\t cur_salt->rounds, &(x.poutc),\n\t\t PBKDF2_MDx_BINARY_SIZE, 0);\n#else\n\t\tpbkdf2_md5((unsigned char*)(saved_key[index]),\n\t\t strlen(saved_key[index]),\n\t\t (unsigned char*)cur_salt->salt, cur_salt->length,\n\t\t cur_salt->rounds, (unsigned char*)crypt_out[index],\n\t\t PBKDF2_MDx_BINARY_SIZE, 0);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/SybaseASE_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ", struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\n#if defined(WITH_UBSAN)\n#else\n#ifndef SIMD_COEF_32\n#pragma omp parallel for default(none) private(index) shared(dirty, prep_ctx, count, crypt_out, prep_key)\n#else\n#pragma omp parallel for default(none) private(index) shared(dirty, count, crypt_cache, crypt_out, prep_key, NULL_LIMB)\n\n\n\n\tfor (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {\n#ifndef SIMD_COEF_32\n\t\tSHA256_CTX ctx;\n\t\tif (dirty) {\n\t\t\tSHA256_Init(&prep_ctx[index]);\n\t\t\tSHA256_Update(&prep_ctx[index], prep_key[index], 510);\n\t\t}\n\t\tmemcpy(&ctx, &prep_ctx[index], sizeof(ctx));\n\t\tSHA256_Update(&ctx, prep_key[index] + 510/2, 8);\n\t\tSHA256_Final((unsigned char *)crypt_out[index], &ctx);\n#else\n\t\tunsigned char _OBuf[32*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *crypt;\n\t\tuint32_t *crypt32;\n\t\tcrypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_CACHE);\n\t\tcrypt32 = (uint32_t*)crypt;\n\t\tif (dirty) {\n\t\t\tSIMDSHA256body(prep_key[index/MAX_KEYS_PER_CRYPT], crypt_cache[index], NULL, SSEi_FLAT_IN|SSEi_FLAT_RELOAD_SWAPLAST);\n\t\t\tSIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][1]), crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t}\n\t\tmemcpy(crypt32, crypt_cache[index], 32*MAX_KEYS_PER_CRYPT);\n\t\tSIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][2]), crypt32, crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);\n\t\t// Last one with FLAT_OUT\n\t\tSIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][3]), crypt_out[index], crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_OUT);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/SybaseASE_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(index) shared(dirty, prep_ctx, count, crypt_out, prep_key)", "context_chars": 100, "text": " index;\n\n#ifdef _OPENMP\n#if defined(WITH_UBSAN)\n#pragma omp parallel for\n#else\n#ifndef SIMD_COEF_32\n#else\n#pragma omp parallel for default(none) private(index) shared(dirty, count, crypt_cache, crypt_out, prep_key, NULL_LIMB)\n\n\n\n\tfor (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {\n#ifndef SIMD_COEF_32\n\t\tSHA256_CTX ctx;\n\t\tif (dirty) {\n\t\t\tSHA256_Init(&prep_ctx[index]);\n\t\t\tSHA256_Update(&prep_ctx[index], prep_key[index], 510);\n\t\t}\n\t\tmemcpy(&ctx, &prep_ctx[index], sizeof(ctx));\n\t\tSHA256_Update(&ctx, prep_key[index] + 510/2, 8);\n\t\tSHA256_Final((unsigned char *)crypt_out[index], &ctx);\n#else\n\t\tunsigned char _OBuf[32*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *crypt;\n\t\tuint32_t *crypt32;\n\t\tcrypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_CACHE);\n\t\tcrypt32 = (uint32_t*)crypt;\n\t\tif (dirty) {\n\t\t\tSIMDSHA256body(prep_key[index/MAX_KEYS_PER_CRYPT], crypt_cache[index], NULL, SSEi_FLAT_IN|SSEi_FLAT_RELOAD_SWAPLAST);\n\t\t\tSIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][1]), crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t}\n\t\tmemcpy(crypt32, crypt_cache[index], 32*MAX_KEYS_PER_CRYPT);\n\t\tSIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][2]), crypt32, crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);\n\t\t// Last one with FLAT_OUT\n\t\tSIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][3]), crypt_out[index], crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_OUT);\n\n\t} #pragma omp parallel for default(none) private(index) shared(dirty, prep_ctx, count, crypt_out, prep_key)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/SybaseASE_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(index) shared(dirty, count, crypt_cache, crypt_out, prep_key, NULL_LIMB)", "context_chars": 100, "text": "parallel for default(none) private(index) shared(dirty, prep_ctx, count, crypt_out, prep_key)\n#else\nfor (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {\n#ifndef SIMD_COEF_32\n\t\tSHA256_CTX ctx;\n\t\tif (dirty) {\n\t\t\tSHA256_Init(&prep_ctx[index]);\n\t\t\tSHA256_Update(&prep_ctx[index], prep_key[index], 510);\n\t\t}\n\t\tmemcpy(&ctx, &prep_ctx[index], sizeof(ctx));\n\t\tSHA256_Update(&ctx, prep_key[index] + 510/2, 8);\n\t\tSHA256_Final((unsigned char *)crypt_out[index], &ctx);\n#else\n\t\tunsigned char _OBuf[32*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *crypt;\n\t\tuint32_t *crypt32;\n\t\tcrypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_CACHE);\n\t\tcrypt32 = (uint32_t*)crypt;\n\t\tif (dirty) {\n\t\t\tSIMDSHA256body(prep_key[index/MAX_KEYS_PER_CRYPT], crypt_cache[index], NULL, SSEi_FLAT_IN|SSEi_FLAT_RELOAD_SWAPLAST);\n\t\t\tSIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][1]), crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t\tSIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);\n\t\t}\n\t\tmemcpy(crypt32, crypt_cache[index], 32*MAX_KEYS_PER_CRYPT);\n\t\tSIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][2]), crypt32, crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);\n\t\t// Last one with FLAT_OUT\n\t\tSIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][3]), crypt_out[index], crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_OUT);\n\n\t} #pragma omp parallel for default(none) private(index) shared(dirty, count, crypt_cache, crypt_out, prep_key, NULL_LIMB)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/bitlocker_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {\n\t\tunsigned char *passwordBuf;\n\t\tint passwordBufSize, i;\n\t\tunsigned char out[MAX_KEYS_PER_CRYPT][32];\n\t\tSHA256_CTX ctx;\n\t\tunsigned char output[256] = { 0 };\n\t\tuint32_t data_size = 0;\n\t\tuint32_t version = 0;\n\t\tunsigned char *vmk_blob = NULL; // contains volume master key\n\t\tunsigned char v1, v2;\n\n\t\tfor (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {\n\t\t\t// do double-sha256 of password encoded in \"utf-16-le\"\n\t\t\tpasswordBuf = (unsigned char*)saved_key[index+i];\n\t\t\tpasswordBufSize = strlen16((UTF16*)passwordBuf) * 2;\n\t\t\tSHA256_Init(&ctx);\n\t\t\tSHA256_Update(&ctx, passwordBuf, passwordBufSize);\n\t\t\tSHA256_Final(out[i], &ctx);\n\t\t\tSHA256_Init(&ctx);\n\t\t\tSHA256_Update(&ctx, out[i], 32);\n\t\t\tSHA256_Final(out[i], &ctx);\n\t\t\t// run bitlocker kdf\n\t\t\tbitlocker_kdf(out[i], out[i]);\n\t\t\tlibcaes_crypt_ccm(out[i], 256, 0, cur_salt->iv, IVLEN, // 0 -> decrypt mode\n\t\t\t\t\tcur_salt->data, cur_salt->data_size,\n\t\t\t\t\toutput, cur_salt->data_size);\n\t\t\t// do known plaintext attack (kpa), version and\n\t\t\t// data_size checks come from libbde, v1 and v2 (vmk_blob)\n\t\t\t// checks come from e-ago\n\t\t\tversion = output[20] | (output[21] << 8);\n\t\t\tdata_size = output[16] | (output[17] << 8);\n\t\t\tvmk_blob = &output[16]; // the actual volume master key is at offset 28\n\t\t\tv1 = vmk_blob[8];\n\t\t\tv2 = vmk_blob[9];\n\t\t\tif (version == 1 && data_size == 0x2c && v1 <= 0x05 && v2 == 0x20)\n\t\t\t\tcracked[index+i] = 1;\n\t\t\telse {\n\t\t\t\tcracked[index+i] = 0;\n#ifdef BITLOCKER_DEBUG\n\t\t\t\tprint_hex(output, cur_salt->data_size);\n\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawKeccak_512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tKeccak_HashInstance hash;\n\t\tKeccak_HashInitialize(&hash, 576, 1024, 512, 0x01);\n\t\tKeccak_HashUpdate(&hash, (unsigned char*)saved_key[index], saved_len[index] * 8);\n\t\tKeccak_HashFinal(&hash, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/AzureAD_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt) {\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\t// * PBKDF2(UTF-16(uc(hex(MD4(UTF-16(password))))), rnd_salt(10), 100, HMAC-SHA256, 32)\n\t\t// Trivial for now. Can optimized later.\n\t\tUTF16 Buf[PLAINTEXT_LENGTH+1];\n\t\tunsigned char hash[16], hex[33];\n\t\tint len, cnt, i;\n\t\tMD4_CTX ctx;\n\n#ifdef SIMD_COEF_32\n\t\tint lens[NBKEYS];\n\t\tunsigned char *pin[NBKEYS];\n\t\tunion {\n\t\t\tuint32_t *pout[NBKEYS];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tcnt = NBKEYS;\n#else\n\t\tcnt = 1;\n\n\t\tif (dirty)\n\t\tfor (i = 0; i < cnt; ++i) {\n\t\t\tlen = enc_to_utf16(Buf, PLAINTEXT_LENGTH, (UTF8*)saved_key[index+i], strlen(saved_key[index+i]));\n\t\t\tif (len < 0) len = 0;\n\t\t\tMD4_Init(&ctx);\n\t\t\tMD4_Update(&ctx, Buf, len*2);\n\t\t\tMD4_Final(hash, &ctx);\n\t\t\tbase64_convert(hash, e_b64_raw, 16, hex, e_b64_hex, sizeof(hex), flg_Base64_HEX_UPCASE, 0);\n\t\t\tfor (len = 0; len < 32; ++len)\n\t\t\t\tsaved_nt[index+i][len<<1] = hex[len];\n\t\t}\n#ifdef SIMD_COEF_32\n\t\tfor (i = 0; i < NBKEYS; ++i) {\n\t\t\tlens[i] = 64;\n\t\t\tpin[i] = (unsigned char*)saved_nt[i+index];\n\t\t\tx.pout[i] = crypt_out[i+index];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, AzureAD_cur_salt->salt, AzureAD_cur_salt->salt_len, AzureAD_cur_salt->iterations, &(x.poutc), 32, 0);\n#else\n\t\tpbkdf2_sha256((unsigned char *)saved_nt[index], 64,\n\t\t\tAzureAD_cur_salt->salt, AzureAD_cur_salt->salt_len,\n\t\t\tAzureAD_cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/adxcrypt_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tadxcrypt(saved_key[index], (unsigned char*)crypt_out[index], strlen(saved_key[index]));\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/vtp_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tMD5_CTX ctx;\n\n\t\t// space for (secret + SUMMARY ADVERTISEMENT + VLANS DATA + secret)\n\n\t\t// derive and append \"secret\", but do it only the FIRST time for a password (not for extra salts).\n\t\tif (dirty)\n\t\t\tvtp_secret_derive(saved_key[index], saved_len[index], secret[index]);\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, secret[index], 16);\n\n\t\t// append vtp_summary_packet\n\t\tMD5_Update(&ctx, &cur_salt->vsp, sizeof(vtp_summary_packet));\n\n\t\t// add trailing bytes (for VTP version >= 2)\n\t\tif (cur_salt->version != 1)\n\t\t\tMD5_Update(&ctx, cur_salt->trailer_data, cur_salt->trailer_length);\n\n\t\t// append vlans_data\n\t\tMD5_Update(&ctx, cur_salt->vlans_data, cur_salt->vlans_data_length);\n\n\t\t// append \"secret\" again\n\t\tMD5_Update(&ctx, secret[index], 16);\n\n\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/net_sha1_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ur_salt->magic != MAGIC) {\n\t\treturn pDynamicFmt->methods.crypt_all(pcount, salt);\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tSHA_CTX ctx;\n\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, cur_salt->salt, cur_salt->length);\n\t\tSHA1_Update(&ctx, saved_key[index], PLAINTEXT_LENGTH);\n\t\tSHA1_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pbkdf2-hmac-md4_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#if SIMD_COEF_32\n\t\tint lens[SSE_GROUP_SZ_MD4], i;\n\t\tunsigned char *pin[SSE_GROUP_SZ_MD4];\n\t\tunion {\n\t\t\tuint32_t *pout[SSE_GROUP_SZ_MD4];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < SSE_GROUP_SZ_MD4; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = crypt_out[index+i];\n\t\t}\n\t\tpbkdf2_md4_sse((const unsigned char **)pin, lens,\n\t\t (unsigned char*)cur_salt->salt, cur_salt->length,\n\t\t cur_salt->rounds, &(x.poutc),\n\t\t PBKDF2_MDx_BINARY_SIZE, 0);\n#else\n\t\tpbkdf2_md4((unsigned char*)(saved_key[index]),\n\t\t strlen(saved_key[index]),\n\t\t (unsigned char*)cur_salt->salt, cur_salt->length,\n\t\t cur_salt->rounds, (unsigned char*)crypt_out[index],\n\t\t PBKDF2_MDx_BINARY_SIZE, 0);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/dmg_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ount = *pcount;\n\tint index;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)\n\t{\n\t\thash_plugin_check_hash(index);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/opencl_DES_bs_h_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ng %d per-salt kernels, one dot per three salts done: \", num_salts);\n\n#if _OPENMP && PARALLEL_BUILD\nfor (i = 0; i < num_salts; i++) {\n\t\t\tinit_kernel(salt_list[i], gpu_id, 1, 0, forced_global_keys ? 0 :local_work_size);\n\n#if _OPENMP && PARALLEL_BUILD\n\t\t\tif (omp_get_thread_num() == 0)\n\n\t\t\t{\n\t\t\t\topencl_process_event();\n\t\t\t}\n\t\t\tif (num_salts > 10 && (i % 3) == 2 && !ocl_any_test_running && john_main_process)\n\t\t\t\tfprintf(stderr, \".\");\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/krb5_tgs_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "dex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char K3[16];\n#ifdef _MSC_VER\n\t\tunsigned char ddata[65536];\n#else\n\t\tunsigned char ddata[cur_salt->edata2len + 1];\n\n\t\tunsigned char checksum[16];\n\t\tRC4_KEY rckey;\n\n\t\tif (new_keys) {\n\t\t\tconst unsigned char data[4] = {2, 0, 0, 0};\n\n\t\t\tMD4_CTX ctx;\n\t\t\tunsigned char key[16];\n\t\t\tUTF16 wkey[PLAINTEXT_LENGTH + 1];\n\t\t\tint len;\n\n\t\t\tlen = enc_to_utf16(wkey, PLAINTEXT_LENGTH,\n\t\t\t\t\t(UTF8*)saved_key[index],\n\t\t\t\t\tstrlen(saved_key[index]));\n\t\t\tif (len <= 0) {\n\t\t\t\tsaved_key[index][-len] = 0;\n\t\t\t\tlen = strlen16(wkey);\n\t\t\t}\n\n\t\t\tMD4_Init(&ctx);\n\t\t\tMD4_Update(&ctx, (char*)wkey, 2 * len);\n\t\t\tMD4_Final(key, &ctx);\n\n\t\t\thmac_md5(key, data, 4, saved_K1[index]);\n\t\t}\n\n\t\thmac_md5(saved_K1[index], cur_salt->edata1, 16, K3);\n\n\t\tRC4_set_key(&rckey, 16, K3);\n\t\tRC4(&rckey, 32, cur_salt->edata2, ddata);\n\n\t\t /*\n\t\t\t8 first bytes are nonce, then ASN1 structures\n\t\t\t(DER encoding: type-length-data)\n\n\t\t\tif length >= 128 bytes:\n\t\t\t\tlength is on 2 bytes and type is\n\t\t\t\t\\x63\\x82 (encode_krb5_enc_tkt_part)\n\t\t\t\tand data is an ASN1 sequence \\x30\\x82\n\t\t\telse:\n\t\t\t\tlength is on 1 byte and type is \\x63\\x81\n\t\t\t\tand data is an ASN1 sequence \\x30\\x81\n\n\t\t\tnext headers follow the same ASN1 \"type-length-data\" scheme\n\t\t */\n\n\t\tif (((!memcmp(ddata + 8, \"\\x63\\x82\", 2)) && (!memcmp(ddata + 16, \"\\xA0\\x07\\x03\\x05\", 4)))\n\t\t\t||\n\t\t\t((!memcmp(ddata + 8, \"\\x63\\x81\", 2)) && (!memcmp(ddata + 16, \"\\x03\\x05\\x00\", 3)))) {\n\n\t\t\t/* Early-reject passed, verify checksum */\n\t\t\tRC4(&rckey, cur_salt->edata2len - 32, cur_salt->edata2 + 32, ddata + 32);\n\t\t\thmac_md5(saved_K1[index], ddata, cur_salt->edata2len, checksum);\n\n\t\t\tif (!memcmp(checksum, cur_salt->edata1, 16)) {\n\t\t\t\tcracked[index] = 1;\n\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pbkdf2-hmac-sha512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SSE_GROUP_SZ_SHA512\n\t\tint lens[SSE_GROUP_SZ_SHA512], i;\n\t\tunsigned char *pin[SSE_GROUP_SZ_SHA512];\n\t\tunion {\n\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA512];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = crypt_out[index+i];\n\t\t}\n\t\tpbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA512_BINARY_SIZE, 0);\n#else\n\t\tpbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]),\n\t\t\tcur_salt->salt, cur_salt->length,\n\t\t\tcur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA512_BINARY_SIZE, 0);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/FG2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_len, crypt_key, cp)", "context_chars": 100, "text": "ruct db_salt *salt)\n{\n\tint count = *pcount;\n\tint i = 0;\n\tchar *cp = FORTINET_MAGIC;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\tSHA256_CTX ctx;\n\n\t\tmemcpy(&ctx, &ctx_salt, sizeof(ctx));\n\n\t\tSHA256_Update(&ctx, saved_key[i], saved_key_len[i]);\n\t\tSHA256_Update(&ctx, cp, FORTINET_MAGIC_LENGTH);\n\t\tSHA256_Final((unsigned char*)crypt_key[i], &ctx);\n\t} #pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_len, crypt_key, cp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/opencl_rar_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "LERROR(clFinish(queue[gpu_id]), \"failed in clFinish\");\n\t\tWAIT_UPDATE\n\t\tWAIT_DONE\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (output[index].key.w[4])\n\t\t\tcheck_rar(file, index, output[index].key.c, output[index].iv);\n\t\telse\n\t\t\tcracked[index] = 0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sunmd5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \"const int count\" we'd work around by\n * simply dropping the const from there.)\n */\n#ifdef _OPENMP\nfor (group_idx = 0; group_idx < ngroups; ++group_idx) {\n\t\tint roundasciilen;\n\t\tint round, maxrounds = BASIC_ROUND_COUNT + getrounds(saved_salt);\n\t\tchar roundascii[8];\n\n\t\tint idx_begin = group_idx * group_sz;\n\t\tint idx_end = idx_begin + group_sz > count ?\n\t\t\tcount : idx_begin + group_sz;\n\n#ifdef SIMD_COEF_32\n\t\tint i, j, zs, zb, zs0, zb0;\n\t\tint bigs[MAX_KEYS_PER_CRYPT], smalls[MAX_KEYS_PER_CRYPT];\n\t\tint nbig, nsmall;\n\t\t// int zb2; // used in debugging\n\t\tmemset(input_buf[group_idx], 0, BLK_CNT*MD5_CBLOCK);\n\n\n\t\t/*\n\t\t * now to delay high-speed md5 implementations that have stuff\n\t\t * like code inlining, loops unrolled and table lookup\n\t\t */\n\n\t\t/* this is the 'first' sprintf(roundascii,\"%d\",round); The rest are at the bottom of the loop */\n\t\t/* some compilers dont allow strcpy inside OMP block with default(none) used */\n\t\t//strcpy(roundascii, \"0\");\n\t\troundascii[0] = '0';\n\t\troundascii[1] = 0;\n\n\t\troundasciilen=1;\n\n\t\tfor (round = 0; round < maxrounds; round++) {\n#ifdef SIMD_COEF_32\n\t\t\tnbig = nsmall = 0;\n\n\n\t\t\t/*\n\t\t\t * now this is computed at bottom of loop (we start properly set at \"0\", len==1)\n\t\t\t * ** code replaced**\n\t\t\t * roundasciilen = sprintf(roundascii, \"%d\", round);\n\t\t\t */\n\t\t\tunsigned int idx;\n\t\t\tfor (idx = idx_begin; idx < idx_end; ++idx) {\n\t\t\t\tpConx px = &data[idx];\n\n\t\t\t\tint indirect_a =\n\t\t\t\t\tmd5bit((unsigned char*)px->digest, round) ?\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 1, 4, 0) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 2, 5, 1) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 3, 6, 2) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 4, 7, 3) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 5, 8, 4) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 6, 9, 5) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 7, 10, 6)\n\t\t\t\t\t:\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 0, 3, 0) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 1, 4, 1) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 2, 5, 2) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 3, 6, 3) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 4, 7, 4) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 5, 8, 5) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 6, 9, 6);\n\n\t\t\t\tint indirect_b =\n\t\t\t\t\tmd5bit((unsigned char*)px->digest, round + 64) ?\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 9, 12, 0) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 10, 13, 1) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 11, 14, 2) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 12, 15, 3) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 13, 0, 4) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 14, 1, 5) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 15, 2, 6)\n\t\t\t\t\t:\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 8, 11, 0) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 9, 12, 1) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 10, 13, 2) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 11, 14, 3) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 12, 15, 4) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 13, 0, 5) |\n\t\t\t\t\tcoin_step((unsigned char*)px->digest, 14, 1, 6);\n\n\t\t\t\tint bit = md5bit((unsigned char*)px->digest, indirect_a) ^ md5bit((unsigned char*)px->digest, indirect_b);\n\n\t\t\t\t/* xor a coin-toss; if true, mix-in the constant phrase */\n\n#ifndef SIMD_COEF_32\n\t\t\t\t/*\n\t\t\t\t * This is the real 'crypt'. Pretty trival, but there are 2 possible sizes\n\t\t\t\t * there is a 1 block crypte, and a 25 block crypt. They are chosen based\n\t\t\t\t * upon the 'long' coin flip algorithm above.\n\t\t\t\t */\n\n\t\t\t\t/* re-initialise the context */\n\t\t\t\tMD5_Init(&px->context);\n\n\t\t\t\t/* update with the previous digest */\n\t\t\t\tMD5_Update(&px->context, (unsigned char*)px->digest, sizeof(px->digest));\n\n\t\t\t\t/* optional, add a constant string. This is what makes the 'long' crypt loops */\n\t\t\t\tif (bit)\n\t\t\t\t\tMD5_Update(&px->context, (unsigned char*)constant_phrase, constant_phrase_size);\n\t\t\t\t/* Add a decimal current roundcount */\n\t\t\t\tMD5_Update(&px->context, (unsigned char*)roundascii, roundasciilen);\n\t\t\t\tMD5_Final((unsigned char*)px->digest, &px->context);\n#else\n\t\t\t\t/*\n\t\t\t\t * we do not actually perform the work here. We run through all of the\n\t\t\t\t * keys we are working on, and figure out which ones need 'small' buffers\n\t\t\t\t * and which ones need large buffers. Then we can group them SIMD_COEF_32*SIMD_PARA_MD5\n\t\t\t\t * at a time, later in the process.\n\t\t\t\t */\n\t\t\t\tif (bit)\n\t\t\t\t\tbigs[nbig++] = idx;\n\t\t\t\telse\n\t\t\t\t\tsmalls[nsmall++] = idx;\n\n\n\t\t\t}\n#ifdef SIMD_COEF_32\n\t\t\t/*\n\t\t\t * ok, at this time we know what group each element is in. Either a large\n\t\t\t * crypt, or small one. Now group our crypts up based upon the crypt size\n\t\t\t * doing COEF*PARA at a time, until we have 2 'partial' buffers left. We\n\t\t\t * 'waste' some CPU in them, but that is what happens. If there is only 1 or\n\t\t\t * or 2, we may even drop back and use oSSL, it may be faster than an entire\n\t\t\t * SSE crypt. We will have to time test, and find where the cut over point is\n\t\t\t * but likely it will NOT be 0. The cuttover appears to be 1, meaning that 0,\n\t\t\t * only a 1 limb PARA buffer will not be done (and will fall back to oSSL). This\n\t\t\t * was for PARA==3 on 32 bit. A much BIGGER difference was in the MAX_KEYS_PER_CRYPT\n\t\t\t * increasing this does make for more speed, HOWEVER, it also makes for more lost time\n\t\t\t * if the run is stopped, since ALL of the words in the keys buffer would have to be\n\t\t\t * redone again (hopefully only redone over the candidates left to test in the input file).\n\t\t\t * The choice to use 512 MAX_KEYS seems about right.\n\t\t\t */\n\n\t\t\t/********************************************/\n\t\t\t/* get the little ones out of the way first */\n\t\t\t/********************************************/\n\n\t\t\t/* first, put the length text, 0x80, and buffer length into the buffer 1 time, not in the loop */\n\t\t\tfor (j = 0; j < BLK_CNT; ++j) {\n\t\t\t\tunsigned char *cpo = &((unsigned char*)input_buf[group_idx])[PARAGETPOS(0, j)];\n\t\t\t\tint k;\n\t\t\t\tfor (k = 0; k < roundasciilen; ++k) {\n\t\t\t\t\tcpo[GETPOS0(k+16)] = roundascii[k];\n\t\t\t\t}\n\t\t\t\tcpo[GETPOS0(k+16)] = 0x80;\n\t\t\t\t((uint32_t*)cpo)[14 * SIMD_COEF_32]=((16+roundasciilen)<<3);\n\t\t\t}\n\t\t\t/* now do the 'loop' for the small 1-limb blocks. */\n\t\t\tzs = zs0 = zb = zb0 = 0;\n\t\t\t// zb2 = 0; /* for debugging */\n\t\t\tfor (i = 0; i < nsmall-MIN_DROP_BACK; i += BLK_CNT) {\n\t\t\t\tfor (j = 0; j < BLK_CNT && zs < nsmall; ++j) {\n\t\t\t\t\tpConx px = &data[smalls[zs++]];\n\t\t\t\t\tuint32_t *pi = px->digest;\n\t\t\t\t\tuint32_t *po = (uint32_t*)&((unsigned char*)input_buf[group_idx])[PARAGETPOS(0, j)];\n\t\t\t\t\t/*\n\t\t\t\t\t * digest is flat, input buf is SSE_COEF.\n\t\t\t\t\t * input_buf is po (output) here, we are writing to it.\n\t\t\t\t\t */\n\t\t\t\t\tpo[0] = pi[0];\n\t\t\t\t\tpo[COEF] = pi[1];\n\t\t\t\t\tpo[COEF+COEF] = pi[2];\n\t\t\t\t\tpo[COEF+COEF+COEF] = pi[3];\n\t\t\t\t}\n\t\t\t\tSIMDmd5body(input_buf[group_idx], out_buf[group_idx], NULL, SSEi_MIXED_IN);\n\t\t\t\t/*\n\t\t\t\t * we convert from COEF back to flat. since this data will later be used\n\t\t\t\t * in non linear order, there is no gain trying to keep it in COEF order\n\t\t\t\t */\n\t\t\t\tfor (j = 0; j < BLK_CNT && zs0 < nsmall; ++j) {\n\t\t\t\t\tuint32_t *pi, *po;\n\t\t\t\t\tpConx px = &data[smalls[zs0++]];\n\t\t\t\t\tpi = (uint32_t*)&((unsigned char*)out_buf[group_idx])[PARAGETOUTPOS(0, j)];\n\t\t\t\t\tpo = px->digest;\n\t\t\t\t\tpo[0] = pi[0];\n\t\t\t\t\tpo[1] = pi[COEF];\n\t\t\t\t\tpo[2] = pi[COEF+COEF];\n\t\t\t\t\tpo[3] = pi[COEF+COEF+COEF];\n\t\t\t\t}\n\t\t\t}\n\t\t\t/* this catches any left over small's, and simply uses oSSL */\n\t\t\twhile (zs < nsmall) {\n\t\t\t\tpConx px = &data[smalls[zs++]];\n\t\t\t\tMD5_Init(&px->context);\n\t\t\t\tMD5_Update(&px->context, (unsigned char*)px->digest, sizeof(px->digest));\n\t\t\t\tMD5_Update(&px->context, (unsigned char*)roundascii, roundasciilen);\n\t\t\t\tMD5_Final((unsigned char*)px->digest, &px->context);\n\t\t\t}\n\t\t\t/*****************************************************************************\n\t\t\t * Now do the big ones. These are more complex that the little ones\n\t\t\t * (much more complex actually). Here, we have to insert the prior crypt\n\t\t\t * into the first 16 bytes (just like in the little ones, but then we have\n\t\t\t * our buffer 'pre-loaded' with a 1517 byte string. we append the text number\n\t\t\t * after the null byte of that 1517 byte string, then put on the 0x80, and\n\t\t\t * then put the bit length. NOTE, that this actually is an array of 25\n\t\t\t * SSE_PARA buffer blocks, so there is quite a bit more manipluation of where\n\t\t\t * in the buffer to write this. This is most noted in the text number, where\n\t\t\t * it spills over from buffer 24 to 25.\n\t\t\t *****************************************************************************/\n\n\t\t\t/* first, put the length text, 0x80, and buffer length into the buffer 1 time, not in the loop */\n\t\t\tfor (j = 0; j < BLK_CNT; ++j) {\n\t\t\t\tunsigned char *cpo23 = &((unsigned char*)input_buf_big[group_idx][23])[PARAGETPOS(0, j)];\n\t\t\t\tunsigned char *cpo24 = &((unsigned char*)input_buf_big[group_idx][24])[PARAGETPOS(0, j)];\n\t\t\t\tuint32_t *po24 = (uint32_t*)cpo24;\n\t\t\t\t*po24 = 0; /* key clean */\n\t\t\t\tcpo23[GETPOS0(61)] = roundascii[0];\n\t\t\t\tswitch(roundasciilen) {\n\t\t\t\t\tcase 1:\n\t\t\t\t\t\tcpo23[GETPOS0(62)] = 0x80;\n\t\t\t\t\t\tcpo23[GETPOS0(63)] = 0; /* key clean. */\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase 2:\n\t\t\t\t\t\tcpo23[GETPOS0(62)] = roundascii[1];\n\t\t\t\t\t\tcpo23[GETPOS0(63)] = 0x80;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase 3:\n\t\t\t\t\t\tcpo23[GETPOS0(62)] = roundascii[1];\n\t\t\t\t\t\tcpo23[GETPOS0(63)] = roundascii[2];\n\t\t\t\t\t\tcpo24[0] = 0x80;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase 4:\n\t\t\t\t\t\tcpo23[GETPOS0(62)] = roundascii[1];\n\t\t\t\t\t\tcpo23[GETPOS0(63)] = roundascii[2];\n\t\t\t\t\t\tcpo24[0] = roundascii[3];\n\t\t\t\t\t\tcpo24[1] = 0x80;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase 5:\n\t\t\t\t\t\tcpo23[GETPOS0(62)] = roundascii[1];\n\t\t\t\t\t\tcpo23[GETPOS0(63)] = roundascii[2];\n\t\t\t\t\t\tcpo24[0] = roundascii[3];\n\t\t\t\t\t\tcpo24[1] = roundascii[4];\n\t\t\t\t\t\tcpo24[2] = 0x80;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase 6:\n\t\t\t\t\t\tcpo23[GETPOS0(62)] = roundascii[1];\n\t\t\t\t\t\tcpo23[GETPOS0(63)] = roundascii[2];\n\t\t\t\t\t\tcpo24[0] = roundascii[3];\n\t\t\t\t\t\tcpo24[1] = roundascii[4];\n\t\t\t\t\t\tcpo24[2] = roundascii[5];\n\t\t\t\t\t\tcpo24[3] = 0x80;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tpo24[14*SIMD_COEF_32]=((16+constant_phrase_size+roundasciilen)<<3);\n\t\t\t}\n\t\t\tfor (i = 0; i < nbig-MIN_DROP_BACK; i += BLK_CNT) {\n\t\t\t\tfor (j = 0; j < BLK_CNT && zb < nbig; ++j) {\n\t\t\t\t\tpConx px = &data[bigs[zb++]];\n\t\t\t\t\tuint32_t *pi = px->digest;\n\t\t\t\t\tuint32_t *po = (uint32_t*)&((unsigned char*)input_buf_big[group_idx][0])[PARAGETPOS(0, j)];\n\t\t\t\t\t/*\n\t\t\t\t\t * digest is flat, input buf is SSE_COEF.\n\t\t\t\t\t * input_buf is po (output) here, we are writing to it.\n\t\t\t\t\t */\n\t\t\t\t\tpo[0] = pi[0];\n\t\t\t\t\tpo[COEF] = pi[1];\n\t\t\t\t\tpo[COEF+COEF] = pi[2];\n\t\t\t\t\tpo[COEF+COEF+COEF] = pi[3];\n\t\t\t\t}\n\t\t\t\tSIMDmd5body(input_buf_big[group_idx][0], out_buf[group_idx], NULL, SSEi_MIXED_IN);\n\t\t\t\tfor (j = 1; j < 25; ++j)\n\t\t\t\t\tSIMDmd5body(input_buf_big[group_idx][j], out_buf[group_idx], out_buf[group_idx], SSEi_RELOAD|SSEi_MIXED_IN);\n\n\t\t\t\tfor (j = 0; j < BLK_CNT && zb0 < nbig; ++j) {\n\t\t\t\t\tuint32_t *pi, *po;\n\t\t\t\t\tpConx px = &data[bigs[zb0++]];\n\t\t\t\t\tpi = (uint32_t*)&((unsigned char*)out_buf[group_idx])[PARAGETOUTPOS(0, j)];\n\t\t\t\t\tpo = px->digest;\n\t\t\t\t\tpo[0] = pi[0];\n\t\t\t\t\tpo[1] = pi[COEF];\n\t\t\t\t\tpo[2] = pi[COEF+COEF];\n\t\t\t\t\tpo[3] = pi[COEF+COEF+COEF];\n\t\t\t\t}\n\t\t\t}\n\t\t\t/* this catches any left overs, and simply uses oSSL */\n\t\t\twhile (zb < nbig) {\n\t\t\t\tpConx px = &data[bigs[zb++]];\n\t\t\t\tMD5_Init(&px->context);\n\t\t\t\tMD5_Update(&px->context, (unsigned char*)px->digest, sizeof(px->digest));\n\t\t\t\tMD5_Update(&px->context, (unsigned char*)constant_phrase, constant_phrase_size);\n\t\t\t\tMD5_Update(&px->context, (unsigned char*)roundascii, roundasciilen);\n\t\t\t\tMD5_Final((unsigned char*)px->digest, &px->context);\n\t\t\t}\n\n\t\t\t/*\n\t\t\t * this is the equivalent of the original code:\n\t\t\t * roundasciilen = sprintf(roundascii, \"%d\", round);\n\t\t\t * that was at the top of this rounds loop. We have moved\n\t\t\t * it to the bottom. It does compute one 'extra' value that\n\t\t\t * is never used (5001), but it is faster, and that one\n\t\t\t * extra value causes no harm.\n\t\t\t * we do call the sprintf a few times (at 10, 100, 1000, etc)\n\t\t\t * but we only call it there.\n\t\t\t */\n\n\t\t\tif (++roundascii[roundasciilen-1] == '9'+1) {\n\t\t\t\tint j = roundasciilen-1;\n\t\t\t\tif (j > 0) {\n\t\t\t\t\tdo {\n\t\t\t\t\t\troundascii[j] = '0';\n\t\t\t\t\t\t++roundascii[--j];\n\t\t\t\t\t} while (j > 0 && roundascii[j] == '9'+1);\n\t\t\t\t}\n\t\t\t\tif (!j && roundascii[0] == '9'+1) {\n\t\t\t\t\t/* some compilers dont allow sprintf inside OMP block */\n\t\t\t\t\t//roundasciilen = sprintf(roundascii, \"%d\", round+1);\n\t\t\t\t\troundascii[0] = '1';\n\t\t\t\t\troundascii[roundasciilen++] = '0';\n\t\t\t\t\troundascii[roundasciilen] = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rvary_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (dirty) {\n\t\t\tDES_cblock des_key;\n\t\t\tint i;\n\n\t\t\t/* process key */\n\t\t\tfor (i = 0; saved_key[index][i]; i++)\n\t\t\t\tdes_key[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][i])];\n\n\t\t\t/* replace missing characters in userid by (EBCDIC space (0x40) XOR 0x55) << 1 */\n\t\t\twhile (i < 8)\n\t\t\t\tdes_key[i++] = 0x2a;\n\n\t\t\tDES_set_key_unchecked(&des_key, &schedules[index]);\n\t\t}\n\t\t/* DES encrypt the password with the password itself by using the key as salt */\n\t\tchar key[10];\n\t\tstrnzcpy(key, saved_key[index], 9);\n\t\tascii2ebcdic((unsigned char *)key);\n\t\tebcdic_padding((unsigned char *)key);\n\t\tDES_ecb_encrypt((const_DES_cblock*)key, (DES_cblock*)crypt_out[index], &schedules[index], DES_ENCRYPT);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/encfs_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 0;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tint i, j;\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][MAX_KEYLENGTH + MAX_IVLENGTH];\n\t\tunsigned char tmpBuf[sizeof(cur_salt->data)];\n\t\tunsigned int checksum = 0;\n\t\tunsigned int checksum2 = 0;\n\t\tunsigned char out[MIN_KEYS_PER_CRYPT][MAX_KEYLENGTH + MAX_IVLENGTH];\n\n#ifdef SIMD_COEF_32\n\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = out[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, cur_salt->saltLen, cur_salt->iterations, pout, cur_salt->keySize + cur_salt->ivLength, 0);\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tmemcpy(master[i], out[i], cur_salt->keySize + cur_salt->ivLength);\n#else\n\t\tpbkdf2_sha1((const unsigned char *)saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->saltLen, cur_salt->iterations, out[0], cur_salt->keySize + cur_salt->ivLength, 0);\n\t\tmemcpy(master[0], out[0], cur_salt->keySize + cur_salt->ivLength);\n\n\t\tfor (j = 0; j < MIN_KEYS_PER_CRYPT; ++j) {\n\t\t\t// First N bytes are checksum bytes.\n\t\t\tfor (i=0; idata[i];\n\t\t\tmemcpy( tmpBuf, cur_salt->data+KEY_CHECKSUM_BYTES, cur_salt->keySize + cur_salt->ivLength );\n\t\t\tencfs_common_streamDecode(cur_salt, tmpBuf, cur_salt->keySize + cur_salt->ivLength ,checksum, master[j]);\n\t\t\tchecksum2 = encfs_common_MAC_32(cur_salt, tmpBuf, cur_salt->keySize + cur_salt->ivLength, master[j]);\n\t\t\tif (checksum2 == checksum) {\n\t\t\t\tcracked[index+j] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/opencl_dashlane_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " NULL, multi_profilingEvent[4]), \"Copy result back\");\n\n\tif (!ocl_autotune_running) {\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\t\tif (dashlane_verify(cur_salt, (unsigned char*)output[index].dk)) {\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/opencl_7z_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ULL, multi_profilingEvent[5]),\n\t\t\"Copy result back\");\n\n\tif (!ocl_autotune_running) {\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\t\tsevenzip_hash *derived = (sevenzip_hash*)&outbuffer[index];\n\n\t\t\tif (derived->reject && (sevenzip_trust_padding || sevenzip_salt->type == 0x80))\n\t\t\t\tcontinue;\n\n\t\t\t/* decrypt and check */\n\t\t\tif ((cracked[index] = sevenzip_decrypt((uint8_t*)derived->key)))\n\t\t\t{\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sap_pse_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char key[MIN_KEYS_PER_CRYPT][24];\n\t\tunsigned char iv[MIN_KEYS_PER_CRYPT][8];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tsize_t lens[MIN_KEYS_PER_CRYPT];\n\t\tsize_t clens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT], *iout[MIN_KEYS_PER_CRYPT];\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = saved_len[i+index];\n\t\t\tclens[i] = saved_len[i+index];\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = key[i];\n\t\t\tiout[i] = iv[i];\n\t\t}\n\t\tpkcs12_pbe_derive_key_simd_sha1(\n\t\t\t\tcur_salt->iterations,\n\t\t\t\tMBEDTLS_PKCS12_DERIVE_KEY, (const unsigned char **)pin, lens,\n\t\t\t\tcur_salt->salt, cur_salt->salt_size, pout, 24);\n\n\t\tpkcs12_pbe_derive_key_simd_sha1(\n\t\t\t\tcur_salt->iterations,\n\t\t\t\tMBEDTLS_PKCS12_DERIVE_IV, (const unsigned char **)pin, clens,\n\t\t\t\tcur_salt->salt, cur_salt->salt_size, iout, 8);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\t// derive key\n\t\t\tpkcs12_pbe_derive_key(1, cur_salt->iterations,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index+i],\n\t\t\t\t\tsaved_len[index+i], cur_salt->salt,\n\t\t\t\t\tcur_salt->salt_size, key[i], 24);\n\t\t\t// derive iv\n\t\t\tpkcs12_pbe_derive_key(1, cur_salt->iterations,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_IV,\n\t\t\t\t\t(unsigned char*)saved_key[index+i],\n\t\t\t\t\tsaved_len[index+i], cur_salt->salt,\n\t\t\t\t\tcur_salt->salt_size, iv[i], 8);\n\t\t}\n\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\tunsigned char out[16];\n\t\t\tunsigned char input[PLAINTEXT_LENGTH + 8];\n\t\t\tint padbyte;\n\t\t\tDES_cblock ivec;\n\t\t\tDES_key_schedule ks1, ks2, ks3;\n\n\t\t\t// pin encryption\n\t\t\tDES_set_key_unchecked((DES_cblock *) key[i], &ks1);\n\t\t\tDES_set_key_unchecked((DES_cblock *) (key[i]+8), &ks2);\n\t\t\tDES_set_key_unchecked((DES_cblock *) (key[i]+16), &ks3);\n\t\t\tmemcpy(ivec, iv[i], 8);\n\t\t\tmemcpy(input, saved_key[index+i], saved_len[index+i]);\n\t\t\tpadbyte = 8 - (saved_len[index+i] % 8);\n\t\t\tif (padbyte < 8 && padbyte > 0)\n\t\t\t\tmemset(input + saved_len[index+i], padbyte, padbyte);\n\t\t\tDES_ede3_cbc_encrypt(input, out, 8, &ks1, &ks2, &ks3, &ivec, DES_ENCRYPT); // is a 8 bytes verifier enough?\n\n\t\t\tcracked[index+i] = !memcmp(out, cur_salt->encrypted_pin, 8);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mongodb_scram_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tint index;\n\tconst int count = *pcount;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#if !defined (SIMD_COEF_32)\n\t\tSHA_CTX ctx;\n\t\tMD5_CTX mctx;\n\t\tunsigned char hexhash[32];\n\t\tunsigned char hash[16];\n\t\tunsigned char out[BINARY_SIZE];\n\n\t\tMD5_Init(&mctx);\n\t\tMD5_Update(&mctx, cur_salt->username, strlen((char*)cur_salt->username));\n\t\tMD5_Update(&mctx, \":mongo:\", 7);\n\t\tMD5_Update(&mctx, saved_key[index], strlen(saved_key[index]));\n\t\tMD5_Final(hash, &mctx);\n\t\thex_encode(hash, 16, hexhash);\n\n\t\tpbkdf2_sha1(hexhash, 32, cur_salt->salt, 16,\n\t\t\t\tcur_salt->iterations, out, BINARY_SIZE, 0);\n\n\t\thmac_sha1(out, BINARY_SIZE, (unsigned char*)\"Client Key\", 10, out, BINARY_SIZE);\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, out, BINARY_SIZE);\n\t\tSHA1_Final((unsigned char*)crypt_out[index], &ctx);\n#else\n\t\tSHA_CTX ctx;\n\t\tMD5_CTX mctx;\n\t\tint i;\n\t\tunsigned char hexhash_[SIMD_KEYS][32], *hexhash[SIMD_KEYS];\n\t\tunsigned char hash[16];\n\t\tint lens[SIMD_KEYS];\n\t\tunsigned char out_[SIMD_KEYS][BINARY_SIZE], *out[SIMD_KEYS];\n\n\t\tfor (i = 0; i < SIMD_KEYS; ++i) {\n\t\t\tMD5_Init(&mctx);\n\t\t\tMD5_Update(&mctx, cur_salt->username, strlen((char*)cur_salt->username));\n\t\t\tMD5_Update(&mctx, \":mongo:\", 7);\n\t\t\tMD5_Update(&mctx, saved_key[index+i], strlen(saved_key[index+i]));\n\t\t\tMD5_Final(hash, &mctx);\n\t\t\thexhash[i] = hexhash_[i];\n\t\t\thex_encode(hash, 16, hexhash[i]);\n\t\t\tlens[i] = 32;\n\t\t\tout[i] = out_[i];\n\t\t}\n\n\t\tpbkdf2_sha1_sse((const unsigned char **)hexhash, lens, cur_salt->salt, 16,\n\t\t\t\tcur_salt->iterations, out, BINARY_SIZE, 0);\n\n\t\tfor (i = 0; i < SIMD_KEYS; ++i) {\n\t\t\thmac_sha1(out[i], BINARY_SIZE, (unsigned char*)\"Client Key\", 10, out[i], BINARY_SIZE);\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, out[i], BINARY_SIZE);\n\t\t\tSHA1_Final((unsigned char*)crypt_out[index+i], &ctx);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/vms_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "VMS_std_crypt(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tuaf_test_password (cur_salt, saved_key[index], 0, crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/nt2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nst unsigned int count =\n\t\t(*pcount + MIN_KEYS_PER_CRYPT - 1) / MIN_KEYS_PER_CRYPT;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n#ifdef SIMD_COEF_32\n\n\t\tSIMDmd4body(&saved_key[i*NBKEYS*64], (unsigned int*)&crypt_key[i*NBKEYS*DIGEST_SIZE], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);\n\n#else\n\t\tMD4_CTX ctx;\n\n\t\tMD4_Init( &ctx );\n\t\tMD4_Update(&ctx, (unsigned char*)saved_key[i], saved_len[i]);\n\t\tMD4_Final((unsigned char*) crypt_key[i], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/keystore_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "x = 0; index < count; ++index)\n\t\tMixOrder[index] = index;\n\ttot_todo = count;\n#endif\n\n#ifdef _OPENMP\nfor (index = 0; index < tot_todo; index += MIN_KEYS_PER_CRYPT) {\n\t\tSHA_CTX ctx;\n#ifdef SIMD_COEF_32\n\t\tint x, tid=0, len, idx;\n\t\tchar tmp_sse_out[20*MIN_KEYS_PER_CRYPT+MEM_ALIGN_SIMD];\n\t\tuint32_t *sse_out;\n\t\tsse_out = (uint32_t *)mem_align(tmp_sse_out, MEM_ALIGN_SIMD);\n#ifdef _OPENMP\n\t\ttid = omp_get_thread_num();\n\n\t\tlen = saved_len[MixOrder[index]];\n\t\tif (len >= 4 && len <= 24) {\n\t\t\tunsigned char *po;\n\t\t\tpo = (unsigned char*)cursimd->first_blk[tid][len-4];\n\t\t\tfor (x = 0; x < MIN_KEYS_PER_CRYPT; ++x) {\n\t\t\t\tint j;\n\t\t\t\tunsigned char *p;\n\t\t\t\tidx = MixOrder[index+x];\n\t\t\t\tp = (unsigned char*)saved_key[idx];\n\t\t\t\tfor (j = 0; j < len; ++j)\n\t\t\t\t\tpo[GETPOS(j*2+1,x)] = p[j];\n\t\t\t}\n\t\t\tSIMDSHA1body(po, sse_out, NULL, SSEi_MIXED_IN);\n\t\t\tpo = (unsigned char*)cursimd->ex_data[len-4];\n\t\t\tfor (x = 0; x < cursimd->n_ex[len-4]; ++x) {\n\t\t\t\tSIMDSHA1body(po, sse_out, sse_out, SSEi_MIXED_IN|SSEi_RELOAD);\n\t\t\t\tpo += 64*MIN_KEYS_PER_CRYPT;\n\t\t\t}\n#ifdef SIMD_COEF_32\n\t\t\t// we have to 'marshal' the data back into the SIMD output buf.\n\t\t\t// but we only marshal the first 4 bytes.\n\t\t\tfor (x = 0; x < MIN_KEYS_PER_CRYPT; ++x) {\n\t\t\t\tidx = MixOrder[index+x];\n\t\t\t\tif (idx < count)\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\t\tcrypt_out[idx][0] = JOHNSWAP(sse_out[5*SIMD_COEF_32*(x/SIMD_COEF_32)+x%SIMD_COEF_32]);\n#else\n\t\t\t\t\tcrypt_out[idx][0] = sse_out[5*SIMD_COEF_32*(x/SIMD_COEF_32)+x%SIMD_COEF_32];\n\n\t\t\t}\n\n\n\t\t\t// we do NOT want to fall through. We handled this\n\t\t\t// SIMD block of data already.\n\t\t\tcontinue;\n\t\t}\n\n\n\t\tif (dirty)\n\t\t\tgetPreKeyedHash(MixOrder[index]);\n\t\tif (saved_len[MixOrder[index]] == 0)\n\t\t\tmemcpy(crypt_out[MixOrder[index]], keystore_cur_salt->data_hash, 20);\n\t\telse {\n\t\t\tmemcpy(&ctx, &saved_ctx[MixOrder[index]], sizeof(ctx));\n\t\t\tSHA1_Update(&ctx, keystore_cur_salt->data, keystore_cur_salt->data_length);\n\t\t\tSHA1_Final((unsigned char*)crypt_out[MixOrder[index]], &ctx);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/monero_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char km[64];\n\t\tunsigned char out[32];\n\t\tunsigned char iv[IVLEN];\n\t\tstruct chacha_ctx ckey;\n\n\t\t// 1\n\t\tmemcpy(iv, cur_salt->ct, IVLEN);\n\t\tcn_slow_hash(saved_key[index], saved_len[index], (char *)km);\n\t\tchacha_keysetup(&ckey, km, 256);\n\t\tchacha_ivsetup(&ckey, iv, NULL, IVLEN);\n\t\tchacha_decrypt_bytes(&ckey, cur_salt->ct + IVLEN + 2, out, 32, 20);\n\t\tif (memmem(out, 32, (void*)\"key_data\", 8) || memmem(out, 32, (void*)\"m_creation_timestamp\", 20)) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t\tcontinue;\n\t\t}\n\n\t\t// 2\n\t\tmemcpy(iv, cur_salt->ct, IVLEN);\n\t\tchacha_keysetup(&ckey, km, 256);\n\t\tchacha_ivsetup(&ckey, iv, NULL, IVLEN);\n\t\tchacha_decrypt_bytes(&ckey, cur_salt->ct + IVLEN + 2, out, 32, 8);\n\t\tif (memmem(out, 32, (void*)\"key_data\", 8) || memmem(out, 32, (void*)\"m_creation_timestamp\", 20)) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/signal_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char key[16];\n\t\tint keylen = 16;\n\t\tunsigned char hash[20];\n\n\t\tpkcs12_pbe_derive_key(1, cur_salt->iterations,\n\t\t\t\tMBEDTLS_PKCS12_DERIVE_KEY,\n\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\tsaved_len[index], cur_salt->mac_salt,\n\t\t\t\tcur_salt->mac_salt_size, key, keylen);\n\t\thmac_sha1(key, keylen, cur_salt->master_secret, cur_salt->master_secret_size - 20, hash, 20);\n\t\tcracked[index] = !memcmp(hash, cur_salt->master_secret + cur_salt->master_secret_size - 20, 20);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/dahua_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\t// hash is compressor(md5(password))\n\t\tMD5_CTX ctx;\n\t\tunsigned char *out = (unsigned char*)crypt_out[index];\n\t\tunsigned char hash[16];\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tMD5_Final(hash, &ctx);\n\n\t\tcompressor(hash, out);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/putty_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 0;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tcracked[index] = LAME_ssh2_load_userkey(saved_key[index]);\n\t\tif (cracked[index])\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rar_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "todo++] = index;\n\t\t}\n\t\twhile (tot_todo % NBKEYS)\n\t\t\tindices[tot_todo++] = count;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < tot_todo; index += NBKEYS) {\n\t\tunsigned int i, j, k;\n\t\tuint8_t (*RawPsw)[NBKEYS*64] = vec_in[index/NBKEYS];\n\t\tuint32_t *digest = vec_out[index/NBKEYS];\n\t\t// all passwords in one batch has the same length\n\t\tint pw_len = saved_len[indices[index]];\n\t\tint RawLength = pw_len + 8 + 3;\n\t\tint cur_len = 0;\n\t\tint fst_blk = 1;\n\t\tint cur_buf = 0;\n\t\tunsigned char tmp1 = 0, tmp2 = 0;\n\n\t\tfor (i = 0; i < ROUNDS; ++i) {\n\t\t\t// copy passwords to vector buffer\n\t\t\tfor (j = 0; j < NBKEYS; ++j) {\n\t\t\t\tint idx = indices[index + j];\n\t\t\t\tint len = cur_len;\n\t\t\t\tfor (k = 0; k < pw_len; ++k) {\n\t\t\t\t\tRawPsw[(len & 64)>>6][GETPOS(len%64, j)] =\n\t\t\t\t\t\tsaved_key[UNICODE_LENGTH*idx + k];\n\t\t\t\t\tlen++;\n\t\t\t\t}\n\t\t\t\tfor (k = 0; k < 8; ++k) {\n\t\t\t\t\tRawPsw[(len & 64)>>6][GETPOS(len%64, j)] = saved_salt[k];\n\t\t\t\t\tlen++;\n\t\t\t\t}\n\n\t\t\t\tRawPsw[(len & 64)>>6][GETPOS(len%64, j)] = (unsigned char)i;\n\t\t\t\tlen++;\n\t\t\t\tif ( ((unsigned char) i) == 0) {\n\t\t\t\t\ttmp1 = (unsigned char)(i >> 8);\n\t\t\t\t\ttmp2 = (unsigned char)(i >> 16);\n\t\t\t\t}\n\t\t\t\tRawPsw[(len & 64)>>6][GETPOS(len%64, j)] = tmp1;\n\t\t\t\tlen++;\n\t\t\t\tRawPsw[(len & 64)>>6][GETPOS(len%64, j)] = tmp2;\n\t\t\t}\n\t\t\tcur_len += RawLength;\n\n\t\t\tif (i % (ROUNDS / 16) == 0) {\n\t\t\t\tuint8_t *tempin = tmp_in[index/NBKEYS];\n\t\t\t\tuint32_t *tempout = tmp_out[index/NBKEYS];\n\t\t\t\tmemcpy(tempin, RawPsw[cur_buf], NBKEYS*64);\n\t\t\t\tfor (j = 0; j < NBKEYS; ++j) { // padding\n\t\t\t\t\tuint32_t *tail;\n\t\t\t\t\tfor (k = RawLength; k < 64; ++k)\n\t\t\t\t\t\ttempin[GETPOS(k, j)] = 0;\n\t\t\t\t\ttempin[GETPOS(RawLength, j)] = 0x80;\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\t\ttail = (uint32_t*)&tempin[GETPOS(64 - 1, j)];\n#else\n\t\t\t\t\ttail = (uint32_t*)&tempin[GETPOS(64 - 1 - 3, j)];\n\n\t\t\t\t\t*tail = cur_len*8;\n\t\t\t\t}\n\t\t\t\tif (i == 0)\n\t\t\t\t\tSIMDSHA1body(tempin, tempout, NULL, SSEi_MIXED_IN);\n\t\t\t\telse\n\t\t\t\t\tSIMDSHA1body(tempin, tempout, digest,\n\t\t\t\t\t SSEi_MIXED_IN | SSEi_RELOAD);\n\t\t\t\tfor (j = 0; j < NBKEYS; ++j) {\n\t\t\t\t\tint idx = indices[index + j];\n\t\t\t\t\taes_iv[idx*16 + i/(ROUNDS/16)] =\n\t\t\t\t\t\t(uint8_t)tempout[HASH_IDX(j) + 4*SIMD_COEF_32];\n\t\t\t\t}\n\t\t\t}\n\t\t\t// swap out and compute digests on the filled buffer\n\t\t\tif ((cur_len & 64) != (cur_buf << 6)) {\n\t\t\t\tif (fst_blk)\n\t\t\t\t\tSIMDSHA1body(RawPsw[cur_buf], digest, NULL, SSEi_MIXED_IN);\n\t\t\t\telse\n\t\t\t\t\tSIMDSHA1body(RawPsw[cur_buf], digest, digest,\n\t\t\t\t\t SSEi_MIXED_IN | SSEi_RELOAD);\n\t\t\t\tfst_blk = 0;\n\t\t\t\tcur_buf = 1 - cur_buf;\n\t\t\t}\n\t\t}\n\t\t// padding\n\t\tmemset(RawPsw[0], 0, sizeof(RawPsw[0]));\n\t\tfor (j = 0; j < NBKEYS; ++j) {\n\t\t\tuint32_t *tail;\n\t\t\tRawPsw[0][GETPOS(0, j)] = 0x80;\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\ttail = (uint32_t*)&RawPsw[0][GETPOS(64 - 1, j)];\n#else\n\t\t\ttail = (uint32_t*)&RawPsw[0][GETPOS(64 - 1 - 3, j)];\n\n\n\t\t\t*tail = cur_len*8;\n\t\t}\n\t\tSIMDSHA1body(RawPsw[0], digest, digest, SSEi_MIXED_IN | SSEi_RELOAD);\n\n\t\tfor (j = 0; j < NBKEYS; ++j) {\n\t\t\tfor (i = 0; i < 4; ++i) {\n\t\t\t\tint idx = indices[index + j];\n\t\t\t\tuint32_t *dst = (uint32_t*)&aes_key[idx*16];\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\tdst[i] = digest[HASH_IDX(j) + i*SIMD_COEF_32];\n#else\n\t\t\t\tdst[i] = JOHNSWAP(digest[HASH_IDX(j) + i*SIMD_COEF_32]);\n\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rar_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(digest[HASH_IDX(j) + i*SIMD_COEF_32]);\n#endif\n\t\t\t}\n\t\t}\n\t}\n\tMEM_FREE(indices);\n#else\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tint i16 = index*16;\n\t\tunsigned int i;\n\t\tunsigned char RawPsw[UNICODE_LENGTH + 8 + 3];\n\t\tint RawLength;\n\t\tSHA_CTX ctx, tempctx;\n\t\tunsigned int digest[5];\n\t\tunsigned char *PswNum, tempout[20];\n\n\t\tRawLength = saved_len[index] + 8 + 3;\n\t\tPswNum = (unsigned char*) &RawPsw[saved_len[index] + 8];\n\t\tPswNum[1] = PswNum[2] = 0;\n\t\t/* derive IV and key for AES from saved_key and\n\t\t saved_salt, this code block is based on unrarhp's\n\t\t and unrar's sources */\n\t\tmemcpy(RawPsw, &saved_key[UNICODE_LENGTH * index], saved_len[index]);\n\t\tmemcpy(RawPsw + saved_len[index], saved_salt, 8);\n\t\tSHA1_Init(&ctx);\n\t\tfor (i = 0; i < ROUNDS; i++) {\n\t\t\tPswNum[0] = (unsigned char) i;\n\t\t\tif ( ((unsigned char) i) == 0) {\n\t\t\t\tPswNum[1] = (unsigned char) (i >> 8);\n\t\t\t\tPswNum[2] = (unsigned char) (i >> 16);\n\t\t\t}\n\t\t\tSHA1_Update(&ctx, RawPsw, RawLength);\n\t\t\tif (i % (ROUNDS / 16) == 0) {\n\t\t\t\ttempctx = ctx;\n\t\t\t\tSHA1_Final(tempout, &tempctx);\n\t\t\t\taes_iv[i16 + i / (ROUNDS / 16)] = tempout[19];\n\t\t\t}\n\t\t}\n\t\tSHA1_Final((unsigned char*)digest, &ctx);\n\t\tfor (i = 0; i < 4; i++)\t/* reverse byte order */\n\t\t\tdigest[i] = JOHNSWAP(digest[i]);\n\t\tmemcpy(&aes_key[i16], (unsigned char*)digest, 16);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rar_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ine static void check_all_rar(rar_file *cur_file, int count)\n{\n\tunsigned int index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++)\n\t\tcheck_rar(cur_file, index, &aes_key[index * 16], &aes_iv[index * 16]);\n}\n\nstatic int cmp_all(void *binary, int count)\n{\n\tfmt_data *blob = binary;\n\trar_file *cur_file = blob->blob;\n\tint index;\n\n\tcheck_all_rar(cur_file, count);\n\n\tfor (index = 0; index < count; index++)\n\t\tif (cracked[index])\n\t\t\treturn 1;\n\treturn 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/NETLMv2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint i = 0;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\tunsigned char ntlm_v2_hash[16];\n\t\tHMACMD5Context ctx; // can't be moved above the OMP pragma\n\n\t\tif (!keys_prepared) {\n\t\t\tint len;\n\t\t\tunsigned char ntlm[16];\n\t\t\t/* Generate 16-byte NTLM hash */\n\t\t\tlen = E_md4hash(saved_plain[i], saved_len[i], ntlm);\n\n\t\t\t// We do key setup of the next HMAC_MD5 here (once per salt)\n\t\t\thmac_md5_init_K16(ntlm, &saved_ctx[i]);\n\n\t\t\tif (len <= 0)\n\t\t\t\tsaved_plain[i][-len] = 0; // match truncation\n\t\t}\n\n\t\t/* HMAC-MD5(Username + Domain, NTLM Hash) */\n\t\tmemcpy(&ctx, &saved_ctx[i], sizeof(ctx));\n\t\thmac_md5_update(&challenge[17], (int)challenge[16], &ctx);\n\t\thmac_md5_final(ntlm_v2_hash, &ctx);\n\n\t\t/* Generate 16-byte non-client nonce portion of LMv2 Response */\n\t\t/* HMAC-MD5(Challenge + Nonce, NTLMv2 Hash) + Nonce */\n\t\thmac_md5(ntlm_v2_hash, challenge, 16, (unsigned char*)output[i]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/cardano_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_len = sizeof(kdf_salt);\n\n\t// ChaCha20 params\n\tstatic const int chacha_rounds = 20;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tint i;\n\n\t\tunsigned char kdf_out[MIN_KEYS_PER_CRYPT][BUF_SIZE];\n\t\tunsigned char blake_out[MIN_KEYS_PER_CRYPT][PWD_HASH_LEN];\n\n#if SIMD_COEF_64\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT]; // blake2b_256 hashed password\n\t\tunsigned char *pout[MIN_KEYS_PER_CRYPT]; // 40-byte length KDF\n\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t// blake2b_256 hash of the password.\n\t\t\tblake2b(blake_out[i], (unsigned char *)saved_key[index + i], NULL, PWD_HASH_LEN,\n\t\t\t strlen(saved_key[index + i]), 0);\n#if SIMD_COEF_64\n\t\t\tlens[i] = PWD_HASH_LEN;\n\t\t\tpin[i] = (unsigned char *)blake_out[i];\n\t\t\tpout[i] = (unsigned char *)kdf_out[i];\n\t\t}\n\n\t\tpbkdf2_sha512_sse((const unsigned char **)pin, lens, (unsigned char *)kdf_salt, kdf_salt_len, kdf_rounds, pout, BUF_SIZE, 0);\n#else\n\t\t\tpbkdf2_sha512(blake_out[i], PWD_HASH_LEN,\n\t\t\t (unsigned char *)kdf_salt, kdf_salt_len, kdf_rounds, kdf_out[i], BUF_SIZE, 0);\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/NETLM_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i, ks) shared(count, output, challenge, saved_key)", "context_chars": 100, "text": "pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tDES_key_schedule ks;\n\tint i;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\t/* Just do a partial binary, the first DES operation */\n\t\tsetup_des_key(saved_key[i], &ks);\n\t\tDES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)output[i],\n\t\t &ks, DES_ENCRYPT);\n\t} #pragma omp parallel for default(none) private(i, ks) shared(count, output, challenge, saved_key)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/crc32_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": " db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint i;\n\n\tswitch (crctype) {\n\n\tcase 0:\n#ifdef _OPENMP\nfor (i = 0; i < count; ++i) {\n\t\t\tCRC32_t crc = crcsalt;\n\t\t\tunsigned char *p = (unsigned char*)saved_key[i];\n\t\t\twhile (*p)\n\t\t\t\tcrc = jtr_crc32(crc, *p++);\n\t\t\tcrcs[i] = crc;\n\t\t\t//printf(\"%s() In: '%s' Out: %08x\\n\", __FUNCTION__, saved_key[i], ~crc);\n\t\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/crc32_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "s() In: '%s' Out: %08x\\n\", __FUNCTION__, saved_key[i], ~crc);\n\t\t}\n\t\tbreak;\n\n\tcase 1:\n#ifdef _OPENMP\nfor (i = 0; i < count; ++i) {\n\t\t\tCRC32_t crc = crcsalt;\n\t\t\tunsigned char *p = (unsigned char*)saved_key[i];\n\t\t\twhile (*p)\n\t\t\t\tcrc = jtr_crc32c(crc, *p++);\n\t\t\tcrcs[i] = crc;\n\t\t\t//printf(\"%s() In: '%s' Out: %08x\\n\", __FUNCTION__, saved_key[i], ~crc);\n\t\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/aix_ssha_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "default:\n#ifdef SSE_GROUP_SZ_SHA512\n\t\tinc = SSE_GROUP_SZ_SHA512;\n#endif\n\t\tbreak;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += inc)\n\t{\n\t\tint j = index;\n\t\twhile (j < index + inc) {\n\t\t\tif (cur_salt->type == 1) {\n#ifdef SSE_GROUP_SZ_SHA1\n\t\t\t\tint lens[SSE_GROUP_SZ_SHA1], i;\n\t\t\t\tunsigned char *pin[SSE_GROUP_SZ_SHA1];\n\t\t\t\tunion {\n\t\t\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA1];\n\t\t\t\t\tunsigned char *poutc;\n\t\t\t\t} x;\n\t\t\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {\n\t\t\t\t\tlens[i] = strlen(saved_key[j]);\n\t\t\t\t\tpin[i] = (unsigned char*)(saved_key[j]);\n\t\t\t\t\tx.pout[i] = crypt_out[j];\n\t\t\t\t\t++j;\n\t\t\t\t}\n\t\t\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0);\n#else\n\t\t\t\tpbkdf2_sha1((const unsigned char*)(saved_key[j]), strlen(saved_key[j]),\n\t\t\t\t\tcur_salt->salt, strlen((char*)cur_salt->salt),\n\t\t\t\t\tcur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0);\n\t\t\t\t++j;\n\n\t\t\t}\n\t\t\telse if (cur_salt->type == 256) {\n#ifdef SSE_GROUP_SZ_SHA256\n\t\t\t\tint lens[SSE_GROUP_SZ_SHA256], i;\n\t\t\t\tunsigned char *pin[SSE_GROUP_SZ_SHA256];\n\t\t\t\tunion {\n\t\t\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA256];\n\t\t\t\t\tunsigned char *poutc;\n\t\t\t\t} x;\n\t\t\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) {\n\t\t\t\t\tlens[i] = strlen(saved_key[j]);\n\t\t\t\t\tpin[i] = (unsigned char*)saved_key[j];\n\t\t\t\t\tx.pout[i] = crypt_out[j];\n\t\t\t\t\t++j;\n\t\t\t\t}\n\t\t\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0);\n#else\n\t\t\t\tpbkdf2_sha256((const unsigned char*)(saved_key[j]), strlen(saved_key[j]),\n\t\t\t\t\tcur_salt->salt, strlen((char*)cur_salt->salt),\n\t\t\t\t\tcur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0);\n\t\t\t\t++j;\n\n\t\t\t}\n\t\t\telse {\n#ifdef SSE_GROUP_SZ_SHA512\n\t\t\t\tint lens[SSE_GROUP_SZ_SHA512], i;\n\t\t\t\tunsigned char *pin[SSE_GROUP_SZ_SHA512];\n\t\t\t\tunion {\n\t\t\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA512];\n\t\t\t\t\tunsigned char *poutc;\n\t\t\t\t} x;\n\t\t\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) {\n\t\t\t\t\tlens[i] = strlen(saved_key[j]);\n\t\t\t\t\tpin[i] = (unsigned char*)saved_key[j];\n\t\t\t\t\tx.pout[i] = crypt_out[j];\n\t\t\t\t\t++j;\n\t\t\t\t}\n\t\t\t\tpbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0);\n#else\n\t\t\t\tpbkdf2_sha512((const unsigned char*)(saved_key[j]), strlen(saved_key[j]),\n\t\t\t\t\tcur_salt->salt, strlen((char*)cur_salt->salt),\n\t\t\t\t\tcur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0);\n\t\t\t\t++j;\n\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DMD5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char hash[16];\n\t\tunsigned char hex_hash[MD5_HEX_SIZE];\n\t\tunsigned char *ptr_src, *ptr_dst;\n\t\tMD5_CTX ctx;\n\t\tint i;\n\n\t\tMD5_Init(&ctx);\n\t\t// \"username:realm\"\n\t\tMD5_Update(&ctx, cur_salt->login_id, cur_salt->login_id_len);\n\t\t// \"password\"\n\t\tMD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tMD5_Final(hash, &ctx);\n\n\t\tMD5_Init(&ctx);\n\t\t// previous result\n\t\tMD5_Update(&ctx, hash, BINARY_SIZE);\n\t\t// \":nonce:cnonce[:authzid]\"\n\t\tMD5_Update(&ctx, cur_salt->nonces, cur_salt->nonces_len);\n\t\tMD5_Final(hash, &ctx);\n\n\t\t// hexify\n\t\tptr_src = hash;\n\t\tptr_dst = hex_hash;\n\t\tfor (i = 0; i < BINARY_SIZE; ++i) {\n\t\t\tunsigned char v = *ptr_src++;\n\n\t\t\t*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];\n\t\t\t*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];\n\t\t}\n\n\t\tMD5_Init(&ctx);\n\t\t// previous result, in hex\n\t\tMD5_Update(&ctx, hex_hash, MD5_HEX_SIZE);\n\t\t// \":nonce:nc:cnonce:qop:hex_A2_hash\n\t\tMD5_Update(&ctx, cur_salt->prehash_KD, cur_salt->prehash_KD_len);\n\t\tMD5_Final((unsigned char*)crypt_key[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/eigrp_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tMD5_CTX ctx;\n\n\t\tif (cur_salt->algo_type == 2) {\n\t\t\tmemcpy(&ctx, &cur_salt->prep_salt, sizeof(MD5_CTX));\n\t\t\tMD5_Update(&ctx, saved_key[index], saved_len[index]);\n\t\t\tif (saved_len[index] < 16) {\n\t\t\t\tMD5_Update(&ctx, zeropad, 16 - saved_len[index]);\n\t\t\t}\n\t\t\t// do we have extra_salt?\n\t\t\tif (cur_salt->have_extra_salt) {\n\t\t\t\tMD5_Update(&ctx, cur_salt->extra_salt, cur_salt->extra_salt_length);\n\t\t\t}\n\t\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\t\t} else {\n\t\t\tHMAC_SHA256_CTX hctx[1];\n\t\t\tunsigned char output[32];\n\t\t\tunsigned char buffer[1 + PLAINTEXT_LENGTH + 45 + 1] = { 0 }; // HMAC key ==> '\\n' + password + IP address\n\t\t\tbuffer[0] = '\\n'; // WTF?\n\t\t\tmemcpy(buffer + 1, saved_key[index], saved_len[index]);\n\t\t\tmemcpy(buffer + 1 + saved_len[index], cur_salt->ip, cur_salt->ip_length);\n\t\t\tHMAC_SHA256_Init(hctx, buffer, 1 + saved_len[index] + cur_salt->ip_length);\n\t\t\tHMAC_SHA256_Update(hctx, cur_salt->salt, cur_salt->length);\n\t\t\tHMAC_SHA256_Final(output, hctx);\n\t\t\tmemcpy((unsigned char*)crypt_out[index], output, BINARY_SIZE);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/hsrp_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tMD5_CTX ctx;\n\t\tint len = saved_len[index];\n\t\tif (dirty) {\n\t\t\t// we use the saved_key buffer in-line.\n\t\t\tunsigned int *block = (unsigned int*)saved_key[index];\n\t\t\tMD5_Init(&saved_ctx[index]);\n\t\t\t// set bit\n\t\t\tsaved_key[index][len] = 0x80;\n\t\t\tblock[14] = len << 3;\n#if !ARCH_LITTLE_ENDIAN\n\t\t\tblock[14] = JOHNSWAP(block[14]);\n\n\t\t\tMD5_Update(&saved_ctx[index], (unsigned char*)block, 64);\n\t\t\t// clear the bit, so that get_key returns proper key.\n\t\t\tsaved_key[index][len] = 0;\n\t\t}\n\t\tmemcpy(&ctx, &saved_ctx[index], sizeof(MD5_CTX));\n\t\t// data\n\t\tMD5_Update(&ctx, cur_salt->salt, cur_salt->length);\n\t\t// key (again)\n\t\tMD5_Update(&ctx, saved_key[index], len);\n\n\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/gpg_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "hm);\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tint res;\n\t\tunsigned char keydata[64];\n\n\t\tgpg_common_cur_salt->s2kfun(saved_key[index], keydata, ks);\n\t\tres = gpg_common_check(keydata, ks);\n\t\tif (res) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/lastpass_sniffed_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tuint32_t key[MIN_KEYS_PER_CRYPT][8];\n\t\tunsigned i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT];\n\t\tunion {\n\t\t\tuint32_t *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tx.pout[i] = key[i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, (unsigned char*)cur_salt->username, strlen(cur_salt->username), cur_salt->iterations, &(x.poutc), 32, 0);\n#else\n\t\tpbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), (unsigned char*)cur_salt->username, strlen(cur_salt->username), cur_salt->iterations, (unsigned char*)(&key[0]),32,0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tunsigned char *Key = (unsigned char*)key[i];\n\t\t\tAES_KEY akey;\n\t\t\tunsigned char iv[16];\n\t\t\tunsigned char out[32];\n\t\t\tAES_set_encrypt_key(Key, 256, &akey);\n\t\t\tmemset(iv, 0, sizeof(iv));\n\t\t\tAES_cbc_encrypt((const unsigned char*)cur_salt->username, out, 32, &akey, iv, AES_ENCRYPT);\n\t\t\tmemcpy(crypt_key[index+i], out, 16);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/opencl_DES_bs_f_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ng %d per-salt kernels, one dot per three salts done: \", num_salts);\n\n#if _OPENMP && PARALLEL_BUILD\nfor (i = 0; i < num_salts; i++) {\n\t\t\tinit_kernel(salt_list[i], gpu_id, 1, 0, forced_global_keys ? 0 :local_work_size);\n\n#if _OPENMP && PARALLEL_BUILD\n\t\t\tif (omp_get_thread_num() == 0)\n\n\t\t\t{\n\t\t\t\topencl_process_event();\n\t\t\t}\n\t\t\tif (num_salts > 10 && (i % 3) == 2 && !ocl_any_test_running && john_main_process)\n\t\t\t\tfprintf(stderr, \".\");\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/whirlpool_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int crypt_0(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_whirlpool0_context ctx;\n\n\t\tsph_whirlpool0_init(&ctx);\n\t\tsph_whirlpool0(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_whirlpool0_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/whirlpool_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int crypt_1(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_whirlpool1_context ctx;\n\n\t\tsph_whirlpool1_init(&ctx);\n\t\tsph_whirlpool1(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_whirlpool1_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/whirlpool_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int crypt_2(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n#if (AC_BUILT && HAVE_WHIRLPOOL) ||\t\\\n (!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)\n\t\tWHIRLPOOL_CTX ctx;\n\n\t\tWHIRLPOOL_Init(&ctx);\n\t\tWHIRLPOOL_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tWHIRLPOOL_Final((unsigned char*)crypt_out[index], &ctx);\n#else\n\t\tsph_whirlpool_context ctx;\n\n\t\tsph_whirlpool_init(&ctx);\n\t\tsph_whirlpool(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_whirlpool_close(&ctx, (unsigned char*)crypt_out[index]);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/vmx_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0]) * cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MAX_KEYS_PER_CRYPT][32];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MAX_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, 32, 0);\n#else\n\t\tfor (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {\n\t\t\tpbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, master[i], 32, 0);\n\t\t}\n\n\t\tfor (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {\n\t\t\tcracked[index+i] = vmx_decrypt(cur_salt, master[i], cur_salt->blob);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawMD4_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt;\n\tint index;\n\tint loops = (count + MIN_KEYS_PER_CRYPT - 1) / MIN_KEYS_PER_CRYPT;\n\n#ifdef _OPENMP\nfor (index = 0; index < loops; index++) {\n#if SIMD_COEF_32\n\t\tSIMDmd4body(saved_key[index], crypt_key[index], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);\n#else\n\t\tMD4_CTX ctx;\n\t\tMD4_Init(&ctx);\n\t\tMD4_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tMD4_Final((unsigned char *)crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ecryptfs_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tint j;\n\t\tSHA512_CTX ctx;\n#ifdef SIMD_COEF_64\n\t\tunsigned char tmpBuf[64];\n\t\tunsigned int i;\n\t\tunsigned char _IBuf[8*SHA_BUF_SIZ*MIN_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys;\n\t\tuint64_t *keys64;\n\n\t\tkeys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);\n\t\tkeys64 = (uint64_t*)keys;\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, cur_salt->salt, ECRYPTFS_SALT_SIZE);\n\t\t\tSHA512_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i]));\n\t\t\tSHA512_Final((unsigned char *)tmpBuf, &ctx);\n\t\t\tfor (j = 0; j < 64; ++j)\n\t\t\t\tkeys[GETPOS_512(j, i)] = tmpBuf[j];\n\t\t}\n\t\tuint64_t rounds = ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS - 1;\n\t\tSIMDSHA512body(keys, keys64, &rounds, SSEi_HALF_IN|SSEi_LOOP);\n\t\t// Last one with FLAT_OUT\n\t\tSIMDSHA512body(keys, (uint64_t*)crypt_out[index], NULL, SSEi_HALF_IN|SSEi_FLAT_OUT);\n#else\n\t\tSHA512_Init(&ctx);\n\t\tSHA512_Update(&ctx, cur_salt->salt, ECRYPTFS_SALT_SIZE);\n\t\tSHA512_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tSHA512_Final((unsigned char *)crypt_out[index], &ctx);\n\t\t/* now \"h\" (crypt_out[index] becomes our input, total SHA-512 calls => 65536 */\n\t\tfor (j = 1; j <= ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS; j++) {\n\t\t\tSHA512_CTX ctx;\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE);\n\t\t\tSHA512_Final((unsigned char *)crypt_out[index], &ctx);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/openssl_enc_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++)\n\t{\n\t\tif (decrypt(saved_key[index]) == 0)\n\t\t\tcracked[index] = 1;\n\t\telse\n\t\t\tcracked[index] = 0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/krb5_asrep_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT)\n\t{\n\t\tunsigned char tkey[MIN_KEYS_PER_CRYPT][32];\n\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\tint i;\n\t\tunsigned char K3[16];\n#ifdef _MSC_VER\n\t\tunsigned char ddata[65536];\n#else\n\t\tunsigned char ddata[cur_salt->edata2len];\n\n\t\tunsigned char checksum[16];\n\t\tRC4_KEY rckey;\n\n\t\tif (cur_salt->etype == 23) {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tif (new_keys) {\n\t\t\t\t\tMD4_CTX ctx;\n\t\t\t\t\tunsigned char key[16];\n\t\t\t\t\tUTF16 wkey[PLAINTEXT_LENGTH + 1];\n\t\t\t\t\tint len;\n\n\t\t\t\t\tlen = enc_to_utf16(wkey, PLAINTEXT_LENGTH,\n\t\t\t\t\t\t\t(UTF8*)saved_key[index+i],\n\t\t\t\t\t\t\tstrlen(saved_key[index+i]));\n\t\t\t\t\tif (len <= 0) {\n\t\t\t\t\t\tsaved_key[index+i][-len] = 0;\n\t\t\t\t\t\tlen = strlen16(wkey);\n\t\t\t\t\t}\n\n\t\t\t\t\tMD4_Init(&ctx);\n\t\t\t\t\tMD4_Update(&ctx, (char*)wkey, 2 * len);\n\t\t\t\t\tMD4_Final(key, &ctx);\n\n\t\t\t\t\thmac_md5(key, data, 4, saved_K1[index+i]);\n\t\t\t\t}\n\n\t\t\t\thmac_md5(saved_K1[index+i], cur_salt->edata1, 16, K3);\n\n\t\t\t\tRC4_set_key(&rckey, 16, K3);\n\t\t\t\tRC4(&rckey, 32, cur_salt->edata2, ddata);\n\n\t\t\t\t/* check the checksum */\n\t\t\t\tRC4(&rckey, cur_salt->edata2len - 32, cur_salt->edata2 + 32, ddata + 32);\n\t\t\t\thmac_md5(saved_K1[index+i], ddata, cur_salt->edata2len, checksum);\n\n\t\t\t\tif (!memcmp(checksum, cur_salt->edata1, 16)) {\n\t\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t}\n\t\t} else if (cur_salt->etype == 17 || cur_salt->etype == 18) {\n\t\t\t// See \"krb5int_decode_tgs_rep\", \"krb5int_enctypes_list\", \"krb5int_dk_decrypt\" (key function),\n\t\t\t// \"krb5_k_decrypt\", and \"krb5_kdc_rep_decrypt_proc\"\n\t\t\t// from krb5 software package.\n\t\t\t// https://www.ietf.org/rfc/rfc3962.txt document, https://www.ietf.org/rfc/rfc3961.txt, and\n\t\t\t// http://www.zeroshell.org/kerberos/Kerberos-operation/\n\t\t\tconst int key_size = (cur_salt->etype == 17) ? 16 : 32;\n\n#ifdef SIMD_COEF_32\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\t\tpout[i] = tkey[i];\n\t\t\t}\n\t\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, (unsigned char*)cur_salt->salt, strlen(cur_salt->salt), 4096, pout, key_size, 0);\n#else\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlen[i] = strlen(saved_key[index+i]);\n\t\t\t\tpbkdf2_sha1((const unsigned char*)saved_key[index], len[i],\n\t\t\t\t\t\t(unsigned char*)cur_salt->salt, strlen(cur_salt->salt),\n\t\t\t\t\t\t4096, tkey[i], key_size, 0);\n\t\t\t}\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tunsigned char Ki[32];\n#ifdef _MSC_VER\n\t\t\t\tunsigned char plaintext[65536];\n#else\n\t\t\t\tunsigned char plaintext[cur_salt->edata2len];\n\n\t\t\t\tunsigned char checksum[20];\n\t\t\t\tunsigned char base_key[32];\n\t\t\t\tunsigned char Ke[32];\n\n\t\t\t\tdk(base_key, tkey[i], key_size, constant, 16);\n\t\t\t\tdk(Ke, base_key, key_size, ke_input, 16);\n\t\t\t\tkrb_decrypt(cur_salt->edata2, cur_salt->edata2len, plaintext, Ke, key_size);\n\t\t\t\t// derive checksum of plaintext\n\t\t\t\tdk(Ki, base_key, key_size, ki_input, 16);\n\t\t\t\thmac_sha1(Ki, key_size, plaintext, cur_salt->edata2len, checksum, 20);\n\t\t\t\tif (!memcmp(checksum, cur_salt->edata1, 12)) {\n\t\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/androidfde_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "unt = *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*max_cracked);\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\thash_plugin_check_hash(index);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/md2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_md2_context ctx;\n\n\t\tsph_md2_init(&ctx);\n\t\tsph_md2(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_md2_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/NETSPLITLM_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i, ks) shared(count, output, challenge, saved_pre)", "context_chars": 100, "text": "pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tDES_key_schedule ks;\n\tint i;\n\n#ifdef _OPENMP\nfor (i=0; i #pragma omp parallel for default(none) private(i, ks) shared(count, output, challenge, saved_pre)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/lotus5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += LOTUS_N) {\n\t\tstruct {\n\t\t\tunion {\n\t\t\t\tunsigned char m[64];\n\t\t\t\tunsigned char m4[4][16];\n\t\t\t\tARCH_WORD m4w[4][16 / ARCH_SIZE];\n\t\t\t} u;\n\t\t} ctx[LOTUS_N];\n\t\tint password_length;\n\n\t\tmemset(ctx[0].u.m4[0], 0, 16);\n\t\tpassword_length = strlen(saved_key[index]);\n\t\tmemset(ctx[0].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);\n\t\tmemcpy(ctx[0].u.m4[1], saved_key[index], password_length);\n\t\tmemcpy(ctx[0].u.m4[2], ctx[0].u.m4[1], 16);\n\n\t\tmemset(ctx[1].u.m4[0], 0, 16);\n\t\tpassword_length = strlen(saved_key[index + 1]);\n\t\tmemset(ctx[1].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);\n\t\tmemcpy(ctx[1].u.m4[1], saved_key[index + 1], password_length);\n\t\tmemcpy(ctx[1].u.m4[2], ctx[1].u.m4[1], 16);\n\n#if LOTUS_N == 3\n\t\tmemset(ctx[2].u.m4[0], 0, 16);\n\t\tpassword_length = strlen(saved_key[index + 2]);\n\t\tmemset(ctx[2].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);\n\t\tmemcpy(ctx[2].u.m4[1], saved_key[index + 2], password_length);\n\t\tmemcpy(ctx[2].u.m4[2], ctx[2].u.m4[1], 16);\n\n\t\tlotus_transform_password(ctx[0].u.m4[1], ctx[0].u.m4[3],\n\t\t ctx[1].u.m4[1], ctx[1].u.m4[3],\n\t\t ctx[2].u.m4[1], ctx[2].u.m4[3]);\n\t\tlotus_mix(ctx[0].u.m, ctx[1].u.m, ctx[2].u.m);\n#else\n\t\tlotus_transform_password(ctx[0].u.m4[1], ctx[0].u.m4[3],\n\t\t ctx[1].u.m4[1], ctx[1].u.m4[3]);\n\t\tlotus_mix(ctx[0].u.m, ctx[1].u.m);\n\n\n\t\tmemcpy(ctx[0].u.m4[1], ctx[0].u.m4[3], 16);\n\t\tmemcpy(ctx[1].u.m4[1], ctx[1].u.m4[3], 16);\n#if LOTUS_N == 3\n\t\tmemcpy(ctx[2].u.m4[1], ctx[2].u.m4[3], 16);\n\n\t\t{\n\t\t\tint i;\n\t\t\tfor (i = 0; i < 16 / ARCH_SIZE; i++) {\n\t\t\t\tctx[0].u.m4w[2][i] = ctx[0].u.m4w[0][i] ^ ctx[0].u.m4w[1][i];\n\t\t\t\tctx[1].u.m4w[2][i] = ctx[1].u.m4w[0][i] ^ ctx[1].u.m4w[1][i];\n#if LOTUS_N == 3\n\t\t\t\tctx[2].u.m4w[2][i] = ctx[2].u.m4w[0][i] ^ ctx[2].u.m4w[1][i];\n\n\t\t\t}\n\t\t}\n#if LOTUS_N == 3\n\t\tlotus_mix(ctx[0].u.m, ctx[1].u.m, ctx[2].u.m);\n#else\n\t\tlotus_mix(ctx[0].u.m, ctx[1].u.m);\n\n\t\tmemcpy(crypt_key[index], ctx[0].u.m4[0], BINARY_SIZE);\n\t\tmemcpy(crypt_key[index + 1], ctx[1].u.m4[0], BINARY_SIZE);\n#if LOTUS_N == 3\n\t\tmemcpy(crypt_key[index + 2], ctx[2].u.m4[0], BINARY_SIZE);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/itunes_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n\n\t\tif (cur_salt->version == 9) { // iTunes Backup < 10\n#ifdef SIMD_COEF_32\n\t\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tint loops = MIN_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA1;\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\t\tpout[i] = master[i];\n\t\t\t}\n\t\t\tfor (i = 0; i < loops; i++)\n\t\t\t\tpbkdf2_sha1_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA1), &lens[i * SSE_GROUP_SZ_SHA1], cur_salt->salt, SALTLEN, cur_salt->iterations, pout + (i * SSE_GROUP_SZ_SHA1), 32, 0);\n#else\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\t\tpbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, SALTLEN, cur_salt->iterations, master[i], 32, 0);\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tcracked[index+i] = itunes_common_decrypt(cur_salt, master[i]);\n\t\t\t}\n\t\t} else { // iTunes Backup 10.x\n#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)\n\t\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tint loops = MIN_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA256;\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\t\tpout[i] = master[i];\n\t\t\t}\n\t\t\tfor (i = 0; i < loops; i++)\n\t\t\t\tpbkdf2_sha256_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA256), &lens[i * SSE_GROUP_SZ_SHA256], cur_salt->dpsl, SALTLEN, cur_salt->dpic, pout + (i * SSE_GROUP_SZ_SHA256), 32, 0);\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = 32;\n\t\t\t\tpin[i] = (unsigned char*)master[i];\n\t\t\t\tpout[i] = master[i];\n\t\t\t}\n\t\t\tloops = MIN_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA1;\n\t\t\tfor (i = 0; i < loops; i++)\n\t\t\t\tpbkdf2_sha1_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA1), &lens[i * SSE_GROUP_SZ_SHA1], cur_salt->salt, SALTLEN, cur_salt->iterations, pout + (i * SSE_GROUP_SZ_SHA1), 32, 0);\n#else\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tpbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->dpsl, SALTLEN, cur_salt->dpic, master[i], 32, 0);\n\t\t\t\tpbkdf2_sha1(master[i], 32, cur_salt->salt, SALTLEN, cur_salt->iterations, master[i], 32, 0);\n\t\t\t}\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tcracked[index+i] = itunes_common_decrypt(cur_salt, master[i]);\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/luks_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char *af_decrypted = (unsigned char *)mem_alloc(cur_salt->afsize + 20);\n\t\tint i, iterations = cur_salt->bestiter;\n\t\tint dklen = john_ntohl(cur_salt->myphdr.keyBytes);\n\t\tuint32_t keycandidate[MIN_KEYS_PER_CRYPT][256/4];\n\t\tuint32_t masterkeycandidate[MIN_KEYS_PER_CRYPT][256/4];\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT];\n\t\tunion {\n\t\t\tuint32_t *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = keycandidate[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens,\n\t\t (const unsigned char*)(cur_salt->myphdr.keyblock[cur_salt->bestslot].passwordSalt), LUKS_SALTSIZE,\n\t\t iterations, &(x.poutc),\n\t\t dklen, 0);\n#else\n\t\tpbkdf2_sha1((const unsigned char *)saved_key[index], strlen(saved_key[index]),\n\t\t (const unsigned char*)(cur_salt->myphdr.keyblock[cur_salt->bestslot].passwordSalt), LUKS_SALTSIZE,\n\t\t iterations, (unsigned char*)keycandidate[0], dklen, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t// Decrypt the blocksi\n\t\t\tdecrypt_aes_cbc_essiv(cur_salt->cipherbuf, af_decrypted, (unsigned char*)keycandidate[i], cur_salt->afsize, cur_salt);\n\t\t\t// AFMerge the blocks\n\t\t\tAF_merge(af_decrypted, (unsigned char*)masterkeycandidate[i], cur_salt->afsize,\n\t\t\t john_ntohl(cur_salt->myphdr.keyblock[cur_salt->bestslot].stripes));\n\t\t}\n\t\t// pbkdf2 again\n#ifdef SIMD_COEF_32\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = john_ntohl(cur_salt->myphdr.keyBytes);\n\t\t\tpin[i] = (unsigned char*)masterkeycandidate[i];\n\t\t\tx.pout[i] = crypt_out[index+i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens,\n\t\t (const unsigned char*)cur_salt->myphdr.mkDigestSalt, LUKS_SALTSIZE,\n\t\t john_ntohl(cur_salt->myphdr.mkDigestIterations), &(x.poutc),\n\t\t LUKS_DIGESTSIZE, 0);\n#else\n\t\tpbkdf2_sha1((unsigned char*)masterkeycandidate[0], john_ntohl(cur_salt->myphdr.keyBytes),\n\t\t (const unsigned char*)cur_salt->myphdr.mkDigestSalt, LUKS_SALTSIZE,\n\t\t john_ntohl(cur_salt->myphdr.mkDigestIterations),\n\t\t (unsigned char*)crypt_out[index], LUKS_DIGESTSIZE, 0);\n\n\n\t\tMEM_FREE(af_decrypted);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/bitcoin_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 0;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tSHA512_CTX sha_ctx;\n\t\tint i;\n\n#ifdef SIMD_COEF_64\n/* We use SSEi_HALF_IN, so can halve SHA_BUF_SIZ */\n#undef SHA_BUF_SIZ\n#define SHA_BUF_SIZ 8\n\t\tchar unaligned_buf[MIN_KEYS_PER_CRYPT*SHA_BUF_SIZ*sizeof(uint64_t)+MEM_ALIGN_SIMD];\n\t\tuint64_t *key_iv = (uint64_t*)mem_align(unaligned_buf, MEM_ALIGN_SIMD);\n\t\tJTR_ALIGN(8) unsigned char hash1[SHA512_DIGEST_LENGTH]; // 512 bits\n\t\tint index2;\n\n\t\tfor (index2 = 0; index2 < MIN_KEYS_PER_CRYPT; index2++) {\n\t\t\t// The first hash for this password\n\t\t\tSHA512_Init(&sha_ctx);\n\t\t\tSHA512_Update(&sha_ctx, saved_key[index+index2], strlen(saved_key[index+index2]));\n\t\t\tSHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length);\n\t\t\tSHA512_Final(hash1, &sha_ctx);\n\n\t\t\t// Now copy and convert hash1 from flat into SIMD_COEF_64 buffers.\n\t\t\tfor (i = 0; i < SHA512_DIGEST_LENGTH/sizeof(uint64_t); ++i) {\n\t\t\t\tkey_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = sha_ctx.h[i];\n\t\t\t}\n\t\t}\n\n\t\t// the first iteration is already done above\n\t\tuint64_t rounds = cur_salt->cry_rounds - 1;\n\t\tSIMDSHA512body(key_iv, key_iv, &rounds, SSEi_HALF_IN|SSEi_LOOP);\n\n\t\tfor (index2 = 0; index2 < MIN_KEYS_PER_CRYPT; index2++) {\n\t\t\tAES_KEY aes_key;\n\t\t\tunion {\n\t\t\t\tunsigned char uc[32];\n\t\t\t\tuint64_t u64[4];\n\t\t\t} key;\n\t\t\tunsigned char iv[16];\n\t\t\tunsigned char output[16];\n\n\t\t\tmemcpy(iv, cur_salt->cry_master + cur_salt->cry_master_length - 32, 16);\n\n\t\t\t// Copy and convert from SIMD_COEF_64 buffers back into flat buffers, in little-endian\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\tfor (i = 0; i < 4; i++) // the derived key\n\t\t\t\tkey.u64[i] = JOHNSWAP64(key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]);\n#else\n\t\t\tfor (i = 0; i < 4; i++) // the derived key\n\t\t\t\tkey.u64[i] = key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64];\n\n\n\t\t\tAES_set_decrypt_key(key.uc, 256, &aes_key);\n\t\t\tAES_cbc_encrypt(cur_salt->cry_master + cur_salt->cry_master_length - 16, output, 16, &aes_key, iv, AES_DECRYPT);\n\n\t\t\tif (check_pkcs_pad(output, 16, 16) == cur_salt->final_block_fill) {\n\t\t\t\tcracked[index + index2] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n#else\n\t\tAES_KEY aes_key;\n\t\tunsigned char key_iv[SHA512_DIGEST_LENGTH]; // buffer for both the derived key and initial IV\n\t\tunsigned char iv[16]; // updated IV for the final block\n\t\tunsigned char output[16];\n\n\t\tmemcpy(iv, cur_salt->cry_master + cur_salt->cry_master_length - 32, 16);\n\n\t\tSHA512_Init(&sha_ctx);\n\t\tSHA512_Update(&sha_ctx, saved_key[index], strlen(saved_key[index]));\n\t\tSHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length);\n\t\tSHA512_Final(key_iv, &sha_ctx);\n\t\tfor (i = 1; i < cur_salt->cry_rounds; i++) { // start at 1; the first iteration is already done\n\t\t\tSHA512_Init(&sha_ctx);\n\t\t\tSHA512_Update(&sha_ctx, key_iv, SHA512_DIGEST_LENGTH);\n\t\t\tSHA512_Final(key_iv, &sha_ctx);\n\t\t}\n\n\t\tAES_set_decrypt_key(key_iv, 256, &aes_key);\n\t\tAES_cbc_encrypt(cur_salt->cry_master + cur_salt->cry_master_length - 16, output, 16, &aes_key, iv, AES_DECRYPT);\n\n\t\tif (check_pkcs_pad(output, 16, 16) == cur_salt->final_block_fill) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/qnx_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ile (tot_todo % MIN_KEYS_PER_CRYPT)\n\t\t\t\tMixOrder[tot_todo++] = count;\n\t\t}\n\t}\n#endif\n\n#ifdef _OPENMP\nfor (index = 0; index < tot_todo; index += inc) {\n#ifdef SIMD_COEF_32\n\t\tif (!MixOrder)\n\n\t\t{\n\t\t\tint i, len = saved_len[index];\n\t\t\tchar *pass = saved_key[index];\n\n\t\t\tswitch (cur_salt->type) {\n\t\t\tcase 5:\n\t\t\t{\n\t\t\t\tMD5_CTX ctx;\n\n\t\t\t\tMD5_Init(&ctx);\n\t\t\t\tMD5_Update(&ctx, cur_salt->salt, cur_salt->len);\n\t\t\t\tfor (i = 0; i <= cur_salt->rounds; ++i)\n\t\t\t\t\tMD5_Update(&ctx, pass, len);\n\t\t\t\tMD5_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tcase 256:\n\t\t\t{\n\t\t\t\tSHA256_CTX ctx;\n\n\t\t\t\tSHA256_Init(&ctx);\n\t\t\t\tSHA256_Update(&ctx, cur_salt->salt, cur_salt->len);\n\t\t\t\tfor (i = 0; i <= cur_salt->rounds; ++i)\n\t\t\t\t\tSHA256_Update(&ctx, pass, len);\n\t\t\t\tSHA256_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tcase 512:\n\t\t\t{\n\t\t\t\tSHA512_CTX ctx;\n\n\t\t\t\tSHA512_Init(&ctx);\n\t\t\t\tSHA512_Update(&ctx, cur_salt->salt, cur_salt->len);\n\t\t\t\tif (len && 128 % len == 0 && cur_salt->len+len*cur_salt->rounds > 256) {\n\t\t\t\t\t// we can optimize this, by filling buffer (after the\n\t\t\t\t\t// first salted buffer), and then simply calling\n\t\t\t\t\t// jtr_sha512_hash_block 'natively' never having to\n\t\t\t\t\t// refill the buffer again.\n\t\t\t\t\tint ex;\n\t\t\t\t\tfor (i = 0; i <= cur_salt->rounds; ++i) {\n\t\t\t\t\t\tSHA512_Update(&ctx, pass, len);\n\t\t\t\t\t\tif (ctx.total > 128+cur_salt->len)\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\t++i;\n\t\t\t\t\tex = (256-ctx.total)/len;\n\t\t\t\t\ti += ex;\n\t\t\t\t\tctx.total += ex*len;\n\t\t\t\t\tjtr_sha512_hash_block(&ctx, ctx.buffer, 1);\n\t\t\t\t\twhile (i+128/len <= cur_salt->rounds) {\n\t\t\t\t\t\tctx.total += 128;\n\t\t\t\t\t\tjtr_sha512_hash_block(&ctx, ctx.buffer, 1);\n\t\t\t\t\t\ti += 128/len;\n\t\t\t\t\t}\n\t\t\t\t\tfor (;i <= cur_salt->rounds; ++i)\n\t\t\t\t\t\tctx.total += len;\n\t\t\t\t} else {\n\t\t\t\t\tfor (i = 0; i <= cur_salt->rounds; ++i)\n\t\t\t\t\t\tSHA512_Update(&ctx, pass, len);\n\t\t\t\t}\n\t\t\t\tctx.bIsQnxBuggy = 1;\n\t\t\t\tSHA512_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/solarwinds_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MAX_KEYS_PER_CRYPT][1024];\n\t\tint i;\n\n#ifdef SIMD_COEF_32\n\t\tint len[MAX_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, (unsigned char *)cur_salt->salt, 8, 1000, pout, 1024, 0);\n#else\n\t\tfor (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)\n\t\t\tpbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]),\n\t\t\t\t(unsigned char *)cur_salt->salt, 8, 1000, master[i], 1024, 0);\n\n\t\tfor (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {\n\t\t\tSHA512_CTX ctx;\n\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, master[i], 1024);\n\t\t\tSHA512_Final((unsigned char*)crypt_out[index+i], &ctx);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/enpass_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "OUP_SZ_SHA512;\n#else\n\tconst int batch_size = MAX_KEYS_PER_CRYPT;\n#endif\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += batch_size) {\n\t\tunsigned char master[MAX_KEYS_PER_CRYPT][32];\n\t\tunsigned char output[24];\n\t\tunsigned char *iv_in;\n\t\tunsigned char iv_out[16];\n\t\tint size, i;\n\t\tAES_KEY akey;\n\n\t\tif (cur_salt->version == 5) {\n#ifdef SIMD_COEF_32\n\t\t\tint len[MAX_BATCH_SIZE];\n\t\t\tunsigned char *pin[MAX_BATCH_SIZE], *pout[MAX_BATCH_SIZE];\n\n\t\t\tfor (i = 0; i < batch_size; ++i) {\n\t\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\t\tpout[i] = master[i];\n\t\t\t}\n\t\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, 16, cur_salt->iterations, pout, 32, 0);\n#else\n\t\t\tfor (i = 0; i < batch_size; ++i)\n\t\t\t\tpbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]),\n\t\t\t\t cur_salt->salt, 16, cur_salt->iterations, master[i], 32, 0);\n\n\t\t} else {\n#ifdef SIMD_COEF_32\n\t\t\tint len[MAX_BATCH_SIZE];\n\t\t\tunsigned char *pin[MAX_BATCH_SIZE], *pout[MAX_BATCH_SIZE];\n\n\t\t\tfor (i = 0; i < batch_size; ++i) {\n\t\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\t\tpout[i] = master[i];\n\t\t\t}\n\t\t\tpbkdf2_sha512_sse((const unsigned char **)pin, len, cur_salt->salt, 16, cur_salt->iterations, pout, 32, 0);\n#else\n\t\t\tfor (i = 0; i < batch_size; ++i)\n\t\t\t\tpbkdf2_sha512((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]),\n\t\t\t\t cur_salt->salt, 16, cur_salt->iterations, master[i], 32, 0);\n\n\t\t}\n\n\t\tfor (i = 0; i < batch_size; ++i) {\n\t\t\t// memcpy(output, SQLITE_FILE_HEADER, FILE_HEADER_SZ);\n\t\t\t// See \"sqlcipher_page_cipher\" and \"sqlite3Codec\" functions\n\t\t\tsize = page_sz - reserve_sz;\n\t\t\tiv_in = cur_salt->data + 16 + size; // initial 16 bytes are salt\n\n\t\t\tmemcpy(iv_out, iv_in, 16);\n\t\t\tAES_set_decrypt_key(master[i], 256, &akey);\n\t\t\t/*\n\t\t\t * decrypting 8 bytes from offset 16 is enough since the\n\t\t\t * verify_page function looks at output[16..23] only.\n\t\t\t */\n\t\t\tAES_cbc_encrypt(cur_salt->data + 16, output + 16, 8, &akey, iv_out, AES_DECRYPT);\n\n\t\t\tif (enpass_verify_page(output) == 0)\n\t\t\t\tcracked[index+i] = 1;\n\t\t\telse\n\t\t\t\tcracked[index+i] = 0;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/padlock_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tunsigned char output[4096] = {0};\n\t\tint i;\n\t\tunsigned char *tag = cur_salt->ct + cur_salt->ctlen - cur_salt->tag_len; // last \"tag_len\" bytes\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, pout, 32, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tpbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, master[i], 32, 0);\n\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tmemset(output, 0, 4096); // avoid possible false positives that can be caused by older \"valid\" decrypted data\n\t\t\taes_ccm_auth_decrypt(master[i], 256,\n\t\t\t\t\tcur_salt->ctlen - cur_salt->tag_len,\n\t\t\t\t\tcur_salt->iv, 13, cur_salt->add, // 13 is the correct iv size for padlock + sjcl combo\n\t\t\t\t\tcur_salt->addlen, cur_salt->ct, output,\n\t\t\t\t\ttag, cur_salt->tag_len);\n\t\t\t// CCM tag calculation is broken in Padlock + SJCL combination. Padlock sends \"add\" data to SJCL\n\t\t\t// without doing base64 decoding! As a result the JavaScript code in SJCL behaves very weirdly.\n\t\t\t// Instead of trying to emulate this broken behavior and struggling with JavaScript, we simply use\n\t\t\t// known plaintext attack here!\n\t\t\tif (cur_salt->ctlen - cur_salt->tag_len == 2) { // special case, empty database\n\t\t\t\tif (strncmp((const char*)output, \"[]\", 2) == 0)\n\t\t\t\t\tcracked[index+i] = 1;\n\t\t\t} else { // general case\n\t\t\t\tif (output[0] != '[')\n\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t\telse if (strstr((const char*)output, \"\\\"updated\\\"\"))\n\t\t\t\t\tcracked[index+i] = 1;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/xmpp_scram_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tint index;\n\tconst int count = *pcount;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#if !defined (SIMD_COEF_32)\n\t\tunsigned char out[BINARY_SIZE];\n\t\tSHA_CTX ctx;\n\n\t\tpbkdf2_sha1((unsigned char*)saved_key[index],\n\t\t\t\tstrlen(saved_key[index]), cur_salt->salt,\n\t\t\t\tcur_salt->saltlen, cur_salt->iterations, out,\n\t\t\t\tBINARY_SIZE, 0);\n\n\t\thmac_sha1(out, BINARY_SIZE, (unsigned char*)\"Client Key\", 10, out, BINARY_SIZE);\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, out, BINARY_SIZE);\n\t\tSHA1_Final((unsigned char*)crypt_out[index], &ctx);\n#else\n\t\tSHA_CTX ctx;\n\t\tint i;\n\t\tunsigned char *pin[SIMD_KEYS];\n\t\tint lens[SIMD_KEYS];\n\t\tunsigned char out_[SIMD_KEYS][BINARY_SIZE], *out[SIMD_KEYS];\n\n\t\tfor (i = 0; i < SIMD_KEYS; ++i) {\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tout[i] = out_[i];\n\t\t}\n\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt,\n\t\t\tcur_salt->saltlen, cur_salt->iterations, out,\n\t\t\tBINARY_SIZE, 0);\n\n\t\tfor (i = 0; i < SIMD_KEYS; ++i) {\n\t\t\thmac_sha1(out[i], BINARY_SIZE, (unsigned char*)\"Client Key\", 10, out[i], BINARY_SIZE);\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, out[i], BINARY_SIZE);\n\t\t\tSHA1_Final((unsigned char*)crypt_out[index+i], &ctx);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/telegram_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "CNT;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += inc) {\n\t\tunsigned char pkey[MIN_KEYS_PER_CRYPT][256]; /* 2048 bits, yes */\n\t\tint i;\n\n\t\tif (cur_salt->version == 1) {\n#ifdef SIMD_COEF_32\n\t\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\t\tfor (i = 0; i < inc; ++i) {\n\t\t\t\tlen[i] = strlen(saved_key[index + i]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[index + i];\n\t\t\t\tpout[i] = pkey[i];\n\t\t\t}\n\t\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, cur_salt->salt_length,\n\t\t\t cur_salt->iterations, pout, 136 /* 256 */, 0);\n#else\n\t\t\tfor (i = 0; i < inc; i++) {\n\t\t\t\tpbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]),\n\t\t\t\t cur_salt->salt, cur_salt->salt_length, cur_salt->iterations,\n\t\t\t\t pkey[i], 136, 0);\n\t\t\t}\n\n\t\t} else { /* (cur_salt->version == 2) */\n#ifdef SIMD_COEF_64\n\t\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char pbkdf2_key[MIN_KEYS_PER_CRYPT][64];\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\t\tfor (i = 0; i < inc; i++) {\n\t\t\t\tSHA512_CTX ctx;\n\n\t\t\t\tSHA512_Init(&ctx);\n\t\t\t\tSHA512_Update(&ctx, (unsigned char*)cur_salt->salt, cur_salt->salt_length);\n\t\t\t\tSHA512_Update(&ctx, (unsigned char*)saved_key[index + i], strlen(saved_key[index + i]));\n\t\t\t\tSHA512_Update(&ctx, (unsigned char*)cur_salt->salt, cur_salt->salt_length);\n\t\t\t\tSHA512_Final(pbkdf2_key[i], &ctx);\n\n\t\t\t\tlen[i] = 64;\n\t\t\t\tpin[i] = pbkdf2_key[i];\n\t\t\t\tpout[i] = pkey[i];\n\t\t\t}\n\t\t\tpbkdf2_sha512_sse((const unsigned char **)pin, len, cur_salt->salt, cur_salt->salt_length,\n\t\t\t cur_salt->iterations, pout, 136 /* 256 */, 0);\n#else\n\t\t\tfor (i = 0; i < inc; i++) {\n\t\t\t\tunsigned char pbkdf2_key[64];\n\t\t\t\tSHA512_CTX ctx;\n\n\t\t\t\tSHA512_Init(&ctx);\n\t\t\t\tSHA512_Update(&ctx, (unsigned char*)cur_salt->salt, cur_salt->salt_length);\n\t\t\t\tSHA512_Update(&ctx, (unsigned char*)saved_key[index + i], strlen(saved_key[index + i]));\n\t\t\t\tSHA512_Update(&ctx, (unsigned char*)cur_salt->salt, cur_salt->salt_length);\n\t\t\t\tSHA512_Final(pbkdf2_key, &ctx);\n\n\t\t\t\tpbkdf2_sha512(pbkdf2_key, 64, cur_salt->salt, cur_salt->salt_length,\n\t\t\t\t cur_salt->iterations, pkey[i], 136, 0);\n\t\t\t}\n\n\t\t}\n\n\t\tfor (i = 0; i < inc; i++) {\n\t\t\tif (telegram_check_password(pkey[i], cur_salt)) {\n\t\t\t\tcracked[index + i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ike_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tcompute_hash(cur_salt, saved_key[index], (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/securezip_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "dex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (securezip_decrypt(cur_salt, saved_key[index])) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/krb5pa-md5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pcount;\n\tconst unsigned char one[] = { 1, 0, 0, 0 };\n\tint i;\n\n\tif (!keys_prepared) {\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\t\tint len;\n\t\t\tunsigned char K[KEY_SIZE];\n\t\t\tunsigned char K1[KEY_SIZE];\n\t\t\t// K = MD4(UTF-16LE(password)), ordinary 16-byte NTLM hash\n\t\t\tlen = E_md4hash((unsigned char *) saved_plain[i], saved_len[i], K);\n\n\t\t\tif (len <= 0)\n\t\t\t\t((char*)(saved_plain[i]))[-len] = 0;\t// match truncation\n\n\t\t\t// K1 = HMAC-MD5(K, 1)\n\t\t\t// 1 is encoded as little endian in 4 bytes (0x01000000)\n\t\t\thmac_md5(K, (unsigned char *) &one, 4, K1);\n\n\t\t\t// We do key setup of the next HMAC_MD5 here. rest in inner loop\n\t\t\thmac_md5_init_K16(K1, &saved_ctx[i]);\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/krb5pa-md5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " in inner loop\n\t\t\thmac_md5_init_K16(K1, &saved_ctx[i]);\n\t\t}\n\t\tkeys_prepared = 1;\n\t}\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\tunsigned char K3[KEY_SIZE], cleartext[TIMESTAMP_SIZE];\n\t\tHMACMD5Context ctx;\n\t\t// key set up with K1 is stored in saved_ctx[i]\n\n\t\t// K3 = HMAC-MD5(K1, CHECKSUM)\n\t\tmemcpy(&ctx, &saved_ctx[i], sizeof(ctx));\n\t\thmac_md5_update((unsigned char*)cur_salt->checksum,\n\t\t CHECKSUM_SIZE, &ctx);\n\t\thmac_md5_final(K3, &ctx);\n\n\t\t// Decrypt part of the timestamp with the derived key K3\n\t\tRC4_single(K3, KEY_SIZE, cur_salt->timestamp, 16, cleartext);\n\n\t\t// Bail out unless we see known plaintext\n\t\tif (cleartext[14] == '2' && cleartext[15] == '0') {\n\t\t\t// Decrypt the rest of the timestamp\n\t\t\tRC4_single(K3, KEY_SIZE, cur_salt->timestamp,\n\t\t\t TIMESTAMP_SIZE, cleartext);\n\t\t\tif (cleartext[28] == 'Z') {\n\t\t\t\t// create checksum K2 = HMAC-MD5(K1, plaintext)\n\t\t\t\tmemcpy(&ctx, &saved_ctx[i], sizeof(ctx));\n\t\t\t\thmac_md5_update(cleartext, TIMESTAMP_SIZE, &ctx);\n\t\t\t\thmac_md5_final((unsigned char*)output[i], &ctx);\n\t\t\t}\n\t\t} else {\n\t\t\toutput[i][0] = 0;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sha256crypt_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "x = 0; index < count; ++index)\n\t\tMixOrder[index] = index;\n\ttot_todo = count;\n#endif\n\n#ifdef _OPENMP\nfor (index = 0; index < tot_todo; index += MIN_KEYS_PER_CRYPT)\n\t{\n\t\t// portably align temp_result char * pointer machine word size.\n\t\tunion xx {\n\t\t\tunsigned char c[BINARY_SIZE];\n\t\t\tARCH_WORD a[BINARY_SIZE/sizeof(ARCH_WORD)];\n\t\t} u;\n\t\tunsigned char *temp_result = u.c;\n\t\tSHA256_CTX ctx;\n\t\tSHA256_CTX alt_ctx;\n\t\tsize_t cnt;\n\t\tint idx;\n\t\tchar *cp;\n\t\tchar p_bytes[PLAINTEXT_LENGTH+1];\n\t\tchar s_bytes[PLAINTEXT_LENGTH+1];\n\t\tchar tmp_cls[sizeof(cryptloopstruct)+MEM_ALIGN_SIMD];\n\t\tcryptloopstruct *crypt_struct;\n#ifdef SIMD_COEF_32\n\t\tchar tmp_sse_out[8*MIN_KEYS_PER_CRYPT*4+MEM_ALIGN_SIMD];\n\t\tuint32_t *sse_out;\n\t\tsse_out = (uint32_t *)mem_align(tmp_sse_out, MEM_ALIGN_SIMD);\n\n\t\tcrypt_struct = (cryptloopstruct *)mem_align(tmp_cls,MEM_ALIGN_SIMD);\n\n\t\tfor (idx = 0; idx < MIN_KEYS_PER_CRYPT; ++idx)\n\t\t{\n\t\t\t/* Prepare for the real work. */\n\t\t\tSHA256_Init(&ctx);\n\n\t\t\t/* Add the key string. */\n\t\t\tSHA256_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* The last part is the salt string. This must be at most 16\n\t\t\t characters and it ends at the first `$' character (for\n\t\t\t compatibility with existing implementations). */\n\t\t\tSHA256_Update(&ctx, cur_salt->salt, cur_salt->len);\n\n\t\t\t/* Compute alternate SHA256 sum with input KEY, SALT, and KEY. The\n\t\t\t final result will be added to the first context. */\n\t\t\tSHA256_Init(&alt_ctx);\n\n\t\t\t/* Add key. */\n\t\t\tSHA256_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* Add salt. */\n\t\t\tSHA256_Update(&alt_ctx, cur_salt->salt, cur_salt->len);\n\n\t\t\t/* Add key again. */\n\t\t\tSHA256_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* Now get result of this (32 bytes) and add it to the other\n\t\t\t context. */\n\t\t\tSHA256_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &alt_ctx);\n\n\t\t\t/* Add for any character in the key one byte of the alternate sum. */\n\t\t\tfor (cnt = saved_len[MixOrder[index+idx]]; cnt > BINARY_SIZE; cnt -= BINARY_SIZE)\n\t\t\t\tSHA256_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);\n\t\t\tSHA256_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], cnt);\n\n\t\t\t/* Take the binary representation of the length of the key and for every\n\t\t\t 1 add the alternate sum, for every 0 the key. */\n\t\t\tfor (cnt = saved_len[MixOrder[index+idx]]; cnt > 0; cnt >>= 1)\n\t\t\t\tif ((cnt & 1) != 0)\n\t\t\t\t\tSHA256_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);\n\t\t\t\telse\n\t\t\t\t\tSHA256_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* Create intermediate result. */\n\t\t\tSHA256_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &ctx);\n\n\t\t\t/* Start computation of P byte sequence. */\n\t\t\tSHA256_Init(&alt_ctx);\n\n\t\t\t/* For every character in the password add the entire password. */\n\t\t\tfor (cnt = 0; cnt < saved_len[MixOrder[index+idx]]; ++cnt)\n\t\t\t\tSHA256_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* Finish the digest. */\n\t\t\tSHA256_Final(temp_result, &alt_ctx);\n\n\t\t\t/* Create byte sequence P. */\n\t\t\tcp = p_bytes;\n\t\t\tfor (cnt = saved_len[MixOrder[index+idx]]; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)\n\t\t\t\tcp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;\n\t\t\tmemcpy (cp, temp_result, cnt);\n\n\t\t\t/* Start computation of S byte sequence. */\n\t\t\tSHA256_Init(&alt_ctx);\n\n\t\t\t/* repeat the following 16+A[0] times, where A[0] represents the\n\t\t\t first byte in digest A interpreted as an 8-bit unsigned value */\n\t\t\tfor (cnt = 0; cnt < 16 + ((unsigned char*)crypt_out[MixOrder[index+idx]])[0]; ++cnt)\n\t\t\t\tSHA256_Update(&alt_ctx, cur_salt->salt, cur_salt->len);\n\n\t\t\t/* Finish the digest. */\n\t\t\tSHA256_Final(temp_result, &alt_ctx);\n\n\t\t\t/* Create byte sequence S. */\n\t\t\tcp = s_bytes;\n\t\t\tfor (cnt = cur_salt->len; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)\n\t\t\t\tcp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;\n\t\t\tmemcpy (cp, temp_result, cnt);\n\n\t\t\t/* Repeatedly run the collected hash value through SHA256 to\n\t\t\t burn CPU cycles. */\n\t\t\tLoadCryptStruct(crypt_struct, MixOrder[index+idx], idx, p_bytes, s_bytes);\n\t\t}\n\n\t\tidx = 0;\n#ifdef SIMD_COEF_32\n\t\tfor (cnt = 1; ; ++cnt) {\n\t\t\tif (crypt_struct->datlen[idx]==128) {\n\t\t\t\tunsigned char *cp = crypt_struct->bufs[0][idx];\n\t\t\t\tSIMDSHA256body(cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);\n\t\t\t\tSIMDSHA256body(&cp[64], sse_out, sse_out, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK|SSEi_RELOAD);\n\t\t\t} else {\n\t\t\t\tunsigned char *cp = crypt_struct->bufs[0][idx];\n\t\t\t\tSIMDSHA256body(cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);\n\t\t\t}\n\t\t\tif (cnt == cur_salt->rounds)\n\t\t\t\tbreak;\n\t\t\t{\n\t\t\t\tunsigned int j, k;\n\t\t\t\tfor (k = 0; k < MIN_KEYS_PER_CRYPT; ++k) {\n\t\t\t\t\tuint32_t *o = (uint32_t *)crypt_struct->cptr[k][idx];\n#if !ARCH_ALLOWS_UNALIGNED\n\t\t\t\t\tif (!is_aligned(o, 4)) {\n\t\t\t\t\t\tunsigned char *cp = (unsigned char*)o;\n\t\t\t\t\t\tfor (j = 0; j < 32; ++j)\n\t\t\t\t\t\t\t*cp++ = ((unsigned char*)sse_out)[GETPOS(j, k)];\n\t\t\t\t\t} else\n\n\n\t\t\t\t\tfor (j = 0; j < 8; ++j)\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\t\t\t*o++ = JOHNSWAP(sse_out[(j*SIMD_COEF_32)+(k&(SIMD_COEF_32-1))+k/SIMD_COEF_32*8*SIMD_COEF_32]);\n#else\n\t\t\t\t\t\t*o++ = sse_out[(j*SIMD_COEF_32)+(k&(SIMD_COEF_32-1))+k/SIMD_COEF_32*8*SIMD_COEF_32];\n\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (++idx == 42)\n\t\t\t\tidx = 0;\n\t\t}\n\t\t{\n\t\t\tunsigned int j, k;\n\t\t\tfor (k = 0; k < MIN_KEYS_PER_CRYPT; ++k) {\n\t\t\t\tuint32_t *o = (uint32_t *)crypt_out[MixOrder[index+k]];\n\t\t\t\tfor (j = 0; j < 8; ++j)\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\t\t*o++ = JOHNSWAP(sse_out[(j*SIMD_COEF_32)+(k&(SIMD_COEF_32-1))+k/SIMD_COEF_32*8*SIMD_COEF_32]);\n#else\n\t\t\t\t\t*o++ = sse_out[(j*SIMD_COEF_32)+(k&(SIMD_COEF_32-1))+k/SIMD_COEF_32*8*SIMD_COEF_32];\n\n\t\t\t}\n\t\t}\n#else\n\t\tSHA256_Init(&ctx);\n\t\tfor (cnt = 1; ; ++cnt) {\n\t\t\t// calling with 64 byte, or 128 byte always, will force the update to properly crypt the data.\n\t\t\t// NOTE the data is fully formed. It ends in a 0x80, is padded with nulls, AND has bit appended.\n\t\t\tSHA256_Update(&ctx, crypt_struct->bufs[0][idx], crypt_struct->datlen[idx]);\n\t\t\tif (cnt == cur_salt->rounds)\n\t\t\t\tbreak;\n#if ARCH_LITTLE_ENDIAN\n\t\t\t{\n\t\t\t\tint j;\n\t\t\t\tuint32_t *o = (uint32_t *)crypt_struct->cptr[0][idx];\n\t\t\t\tfor (j = 0; j < 8; ++j)\n\t\t\t\t\t*o++ = JOHNSWAP(ctx.h[j]);\n\t\t\t}\n#else\n\t\t\tmemcpy(crypt_struct->cptr[0][idx], ctx.h, BINARY_SIZE);\n\n\t\t\tif (++idx == 42)\n\t\t\t\tidx = 0;\n\n\t\t\t// this memcpy is 'good enough', used instead of SHA256_Init()\n\t\t\tmemcpy(ctx.h, ctx_init, sizeof(ctx_init));\n\t\t}\n#if ARCH_LITTLE_ENDIAN\n\t\t{\n\t\t\tint j;\n\t\t\tuint32_t *o = (uint32_t *)crypt_out[MixOrder[index]];\n\t\t\tfor (j = 0; j < 8; ++j)\n\t\t\t\t*o++ = JOHNSWAP(ctx.h[j]);\n\t\t}\n#else\n\t\tmemcpy(crypt_out[MixOrder[index]], ctx.h, BINARY_SIZE);\n\n\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/fvde_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "> type == 2)\n\t\toutlen = 32;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, outlen, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tpbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, master[i], outlen, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tcracked[index+i] = fvde_decrypt(cur_salt, master[i]);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/zip_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "onst stuff like key_len being in the shared list,\n * while other versions demand they do. WAT!\n */\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tunsigned char pwd_ver[MIN_KEYS_PER_CRYPT][3 * BLK_SZ];\n\t\tint i, lens[MIN_KEYS_PER_CRYPT];\n\t\tint something_hit = 0, hits[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[i + index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i + index];\n\t\t\tpout[i] = pwd_ver[i] + early_skip - late_skip;\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens, saved_salt->salt, SALT_LENGTH(saved_salt->v.mode),\n\t\t KEYING_ITERATIONS, pout, BLK_SZ, early_skip);\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tif (!memcmp(pwd_ver[i] + (2 * key_len - late_skip), saved_salt->passverify, 2))\n\t\t\t\tsomething_hit = hits[i] = 1;\n\t\tif (something_hit) {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\t\tpout[i] = pwd_ver[i];\n\t\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens, saved_salt->salt, SALT_LENGTH(saved_salt->v.mode),\n\t\t\t KEYING_ITERATIONS, pout, late_size, late_skip);\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tif (hits[i]) {\n\t\t\t\t\thmac_sha1(pwd_ver[i] + key_len - late_skip, key_len,\n\t\t\t\t\t (const unsigned char*)saved_salt->datablob, saved_salt->comp_len,\n\t\t\t\t\t crypt_key[index+i], WINZIP_BINARY_SIZE);\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tmemset(crypt_key[index + i], 0, WINZIP_BINARY_SIZE);\n\t\t\t}\n\t\t} else {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\t\tmemset(crypt_key[index + i], 0, WINZIP_BINARY_SIZE);\n\t\t}\n#else\n\t\tunsigned char pwd_ver[3 * BLK_SZ];\n\n\t\t/* Get the block that contains the two-byte verifier */\n\t\tpbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]),\n\t\t saved_salt->salt, SALT_LENGTH(saved_salt->v.mode), KEYING_ITERATIONS,\n\t\t pwd_ver + early_skip - late_skip, BLK_SZ, early_skip);\n\n\t\t/* Early-rejection */\n\t\tif (!memcmp(pwd_ver + 2 * key_len - late_skip, saved_salt->passverify, 2)) {\n\n\t\t\t/* Get the remaining block(s) needed for the HMAC */\n\t\t\tpbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]),\n\t\t\t saved_salt->salt, SALT_LENGTH(saved_salt->v.mode), KEYING_ITERATIONS,\n\t\t\t pwd_ver, late_size, late_skip);\n\n\t\t\thmac_sha1(pwd_ver + key_len - late_skip, key_len,\n\t\t\t (const unsigned char*)saved_salt->datablob, saved_salt->comp_len,\n\t\t\t crypt_key[index], WINZIP_BINARY_SIZE);\n\t\t}\n\t\telse\n\t\t\tmemset(crypt_key[index], 0, WINZIP_BINARY_SIZE);\n\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawKeccak_256_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tKeccak_HashInstance hash;\n\t\tKeccak_HashInitialize(&hash, 1088, 512, 256, 0x01);\n\t\tKeccak_HashUpdate(&hash, (unsigned char*)saved_key[index], saved_len[index] * 8);\n\t\tKeccak_HashFinal(&hash, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/XSHA_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " struct db_salt *salt)\n{\n\tint count = *pcount;\n#ifdef SIMD_COEF_32\n\tint i = 0;\n#if defined(_OPENMP)\nfor (i = 0; i < threads; i++) {\n\n\t\tunsigned int *in = &saved_key[i*NBKEYS*SHA_BUF_SIZ];\n\t\tunsigned int *out = &crypt_key[i*NBKEYS*BINARY_SIZE/4];\n\t\tunsigned int j;\n\t\tfor (j=0; j < NBKEYS; j++)\n\t\t\tin[(j&(SIMD_COEF_32-1)) + j/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = cur_salt;\n\t\tSIMDSHA1body(in, out, NULL, SSEi_MIXED_IN);\n#if defined(_OPENMP)\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/XSHA_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, ctx_salt, saved_key, saved_len, crypt_out)", "context_chars": 100, "text": "HA1body(in, out, NULL, SSEi_MIXED_IN);\n#if defined(_OPENMP)\n\t}\n#endif\n#else\n\tint i;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\tSHA_CTX ctx;\n\n\t\tmemcpy(&ctx, &ctx_salt, sizeof(ctx));\n\n\t\tSHA1_Update(&ctx, saved_key[i], saved_len[i]);\n\t\tSHA1_Final((unsigned char *)(crypt_out[i]), &ctx);\n\t} #pragma omp parallel for default(none) private(i) shared(count, ctx_salt, saved_key, saved_len, crypt_out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pbkdf2-hmac-sha1_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SSE_GROUP_SZ_SHA1\n\t\tint lens[SSE_GROUP_SZ_SHA1], i;\n\t\tunsigned char *pin[SSE_GROUP_SZ_SHA1];\n\t\tunion {\n\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA1];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = crypt_out[index+i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens,\n\t\t cur_salt->salt, cur_salt->length,\n\t\t cur_salt->rounds, &(x.poutc),\n\t\t PBKDF2_SHA1_BINARY_SIZE, 0);\n#else\n\t\tpbkdf2_sha1((const unsigned char*)(saved_key[index]),\n\t\t strlen(saved_key[index]),\n\t\t cur_salt->salt, cur_salt->length,\n\t\t cur_salt->rounds, (unsigned char*)crypt_out[index],\n\t\t PBKDF2_SHA1_BINARY_SIZE, 0);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/panama_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_panama_context ctx;\n\n\t\tsph_panama_init(&ctx);\n\t\tsph_panama(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_panama_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ethereum_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\tint failed = 0;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n\t\tif (cur_salt->type == 0) {\n#ifdef SIMD_COEF_32\n\t\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\t\tpout[i] = master[i];\n\t\t\t}\n\t\t\tpbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, pout, 32, 0);\n#else\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\t\tpbkdf2_sha256((unsigned char *)saved_key[index+i],\n\t\t\t\t\t\tstrlen(saved_key[index+i]),\n\t\t\t\t\t\tcur_salt->salt, cur_salt->saltlen,\n\t\t\t\t\t\tcur_salt->iterations, master[i], 32,\n\t\t\t\t\t\t0);\n\n\t\t} else if (cur_salt->type == 1) {\n#ifdef _OPENMP\n\t\t\tint t = omp_get_thread_num();\n\t\t\tif (t >= max_threads) {\n\t\t\t\tfailed = -1;\n\t\t\t\tcontinue;\n\t\t\t}\n#else\n\t\t\tconst int t = 0;\n\n\t\t\tyescrypt_params_t params = { .N = cur_salt->N, .r = cur_salt->r, .p = cur_salt->p };\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tif (yescrypt_kdf(NULL, &local[t],\n\t\t\t\t (const uint8_t *)saved_key[index + i],\n\t\t\t\t strlen(saved_key[index + i]),\n\t\t\t\t (const uint8_t *)cur_salt->salt,\n\t\t\t\t strlen((const char *)cur_salt->salt),\n\t\t\t\t ¶ms,\n\t\t\t\t master[i], 32)) {\n\t\t\t\t\tfailed = errno ? errno : EINVAL;\n#ifndef _OPENMP\n\t\t\t\t\tgoto fail_with_errno;\n\n\t\t\t\t}\n\t\t\t}\n\t\t} else if (cur_salt->type == 2) {\n\t\t\tif (new_keys) {\n\t\t\t\t/* Presale. No salt! */\n#ifdef SIMD_COEF_32\n\t\t\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\t\t\tint slens[MIN_KEYS_PER_CRYPT];\n\t\t\t\tunsigned char *sin[MIN_KEYS_PER_CRYPT];\n\t\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\t\t\tpout[i] = master[i];\n\t\t\t\t\tsin[i] = pin[i];\n\t\t\t\t\tslens[i] = lens[i];\n\t\t\t\t}\n\t\t\t\tpbkdf2_sha256_sse_varying_salt((const unsigned char**)pin, lens, sin, slens, 2000, pout, 16, 0);\n\t\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\t\tmemcpy(saved_presale[index + i], pout[i], 32);\n\t\t\t\t}\n#else\n\n\t\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\t\tpbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), 2000, master[i], 16, 0);\n\t\t\t\t}\n\t\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\t\tmemcpy(saved_presale[index + i], master[i], 32);\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\t\t\tmemcpy(master[i], saved_presale[index + i], 32);\n\t\t\t}\n\t\t}\n\n\t\tif (cur_salt->type == 0 || cur_salt->type == 1) {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tKeccak_HashInstance hash;\n\t\t\t\tKeccak_HashInitialize(&hash, 1088, 512, 256, 0x01); // delimitedSuffix is 0x06 for SHA-3, and 0x01 for Keccak\n\t\t\t\tKeccak_HashUpdate(&hash, master[i] + 16, 16 * 8);\n\t\t\t\tKeccak_HashUpdate(&hash, cur_salt->ct, cur_salt->ctlen * 8);\n\t\t\t\tKeccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]);\n\t\t\t}\n\t\t} else {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tAES_KEY akey;\n\t\t\t\tKeccak_HashInstance hash;\n\t\t\t\tunsigned char iv[16];\n\t\t\t\tunsigned char seed[4096];\n\t\t\t\tint padbyte;\n\t\t\t\tint datalen;\n\n\t\t\t\tAES_set_decrypt_key(master[i], 128, &akey);\n\t\t\t\tmemcpy(iv, cur_salt->encseed, 16);\n\t\t\t\tAES_cbc_encrypt(cur_salt->encseed + 16, seed, cur_salt->eslen - 16, &akey, iv, AES_DECRYPT);\n\t\t\t\tif (check_pkcs_pad(seed, cur_salt->eslen - 16, 16) < 0) {\n\t\t\t\t\tmemset(crypt_out[index+i], 0, BINARY_SIZE);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tpadbyte = seed[cur_salt->eslen - 16 - 1];\n\t\t\t\tdatalen = cur_salt->eslen - 16 - padbyte;\n\t\t\t\tif (datalen < 0) {\n\t\t\t\t\tmemset(crypt_out[index+i], 0, BINARY_SIZE);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tKeccak_HashInitialize(&hash, 1088, 512, 256, 0x01);\n\t\t\t\tKeccak_HashUpdate(&hash, seed, datalen * 8);\n\t\t\t\tKeccak_HashUpdate(&hash, dpad.data, 1 * 8);\n\t\t\t\tKeccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]);\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/episerver_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\n#ifdef SIMD_COEF_32\n\tfor (index = 0; index < count; index += (cur_salt->version == 0 ? NBKEYS_SHA1 : NBKEYS_SHA256)) {\n\t\tuint32_t *in = &saved_key[HASH_IDX_IN];\n\t\tuint32_t *out = &crypt_out[HASH_IDX_OUT];\n\n\t\tif (cur_salt->version == 0)\n\t\t\tSIMDSHA1body(in, out, NULL, SSEi_MIXED_IN);\n\t\telse //if (cur_salt->version == 1)\n\t\t\tSIMDSHA256body(in, out, NULL, SSEi_MIXED_IN);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pgpwde_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " count = *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0]) * count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char key[40];\n\t\tint ret;\n\n\t\tS2KPGPWDE(saved_key[index], cur_salt->salt, key, 32);\n\t\tret = pgpwde_decrypt_and_verify(key, cur_salt->esk, 128);\n\t\tcracked[index] = (0 == ret);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/django_scrypt_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(index) shared(count, failed, params, max_threads, local, saved_key, cur_salt, crypt_out)", "context_chars": 100, "text": "params_t params = { .N = 1ULL << cur_salt->N, .r = cur_salt->r, .p = cur_salt->p };\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n#ifdef _OPENMP\n\t\tint t = omp_get_thread_num();\n\t\tif (t >= max_threads) {\n\t\t\tfailed = -1;\n\t\t\tcontinue;\n\t\t}\n#else\n\t\tconst int t = 0;\n\n\t\tif (yescrypt_kdf(NULL, &local[t],\n\t\t (const uint8_t *)saved_key[index],\n\t\t strlen(saved_key[index]),\n\t\t (const uint8_t *)cur_salt->salt,\n\t\t strlen(cur_salt->salt),\n\t\t ¶ms,\n\t\t (uint8_t *)crypt_out[index],\n\t\t sizeof(crypt_out[index]))) {\n\t\t\tfailed = errno ? errno : EINVAL;\n#ifndef _OPENMP\n\t\t\tbreak;\n\n\t\t}\n\t} #pragma omp parallel for default(none) private(index) shared(count, failed, params, max_threads, local, saved_key, cur_salt, crypt_out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/wow_srp_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t crypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint j;\n#ifdef _OPENMP\nfor (j = 0; j < count; ++j) {\n\t\tSHA_CTX ctx;\n\t\tunsigned char Tmp[20];\n\n\t\tmemset(crypt_out[j], 0, sizeof(crypt_out[j]));\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, user_id, strlen((char*)user_id));\n\t\tSHA1_Update(&ctx, \":\", 1);\n\t\tSHA1_Update(&ctx, saved_key[j], strlen(saved_key[j]));\n\t\tSHA1_Final(Tmp, &ctx);\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, saved_salt, strlen((char*)saved_salt));\n\t\tSHA1_Update(&ctx, Tmp, 20);\n\t\tSHA1_Final(Tmp, &ctx);\n\t\t// Ok, now Tmp is v\n\n\t\t//if (!strcmp(saved_key[j], \"ENTERNOW__1\") && !strcmp((char*)user_id, \"DIP\")) {\n\t\t//\tprintf(\"salt=%s user=%s pass=%s, \", (char*)saved_salt, (char*)user_id, saved_key[j]);\n\t\t//\tdump_stuff_msg(\"sha$h \", Tmp, 20);\n\t\t//}\n\n#ifdef HAVE_LIBGMP\n\t{\n\t\tunsigned char HashStr[80], *p;\n\t\tint i, todo;\n\t\tp = HashStr;\n\t\tfor (i = 0; i < 20; ++i) {\n\t\t\t*p++ = itoa16[Tmp[i]>>4];\n\t\t\t*p++ = itoa16[Tmp[i]&0xF];\n\t\t}\n\t\t*p = 0;\n\n\t\tmpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16);\n\t\tmpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod );\n\t\tmpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop);\n\n\t\tp = HashStr;\n\t\ttodo = strlen((char*)p);\n\t\tif (todo&1) {\n\t\t\t((unsigned char*)(crypt_out[j]))[0] = atoi16[ARCH_INDEX(*p)];\n\t\t\t++p;\n\t\t\t--todo;\n\t\t} else {\n\t\t\t((unsigned char*)(crypt_out[j]))[0] =\n\t\t\t\t(atoi16[ARCH_INDEX(*p)] << 4) |\n\t\t\t\tatoi16[ARCH_INDEX(p[1])];\n\t\t\tp += 2;\n\t\t\ttodo -= 2;\n\t\t}\n\t\ttodo >>= 1;\n\t\tfor (i = 1; i <= todo; i++) {\n\t\t\t((unsigned char*)(crypt_out[j]))[i] =\n\t\t\t\t(atoi16[ARCH_INDEX(*p)] << 4) |\n\t\t\t\tatoi16[ARCH_INDEX(p[1])];\n\t\t\tp += 2;\n\t\t}\n\t\t//if (!strcmp(saved_key[j], \"ENTERNOW__1\") && !strcmp((char*)user_id, \"DIP\")) {\n\t\t//\tdump_stuff_msg(\"crypt \", crypt_out[j], 32);\n\t\t//}\n\t}\n#else\n\t\t// using oSSL's BN to do expmod.\n\t\tpSRP_CTX[j].z_exp = BN_bin2bn(Tmp,20,pSRP_CTX[j].z_exp);\n\t\tBN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx);\n\t\tBN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j]));\n\t\t//if (!strcmp(saved_key[j], \"ENTERNOW__1\") && !strcmp((char*)user_id, \"DIP\")) {\n\t\t//\tdump_stuff_msg(\"crypt \", crypt_out[j], 32);\n\t\t//}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pdf_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 0;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char output[32];\n\t\tpdf_compute_user_password((unsigned char*)saved_key[index], output);\n\t\tif (crypt_out->R == 2 || crypt_out->R == 5 || crypt_out->R == 6)\n\t\t\tif (memcmp(output, crypt_out->u, 32) == 0) {\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\tif (crypt_out->R == 3 || crypt_out->R == 4)\n\t\t\tif (memcmp(output, crypt_out->u, 16) == 0) {\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/salted_sha1_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "unt = *pcount;\n\tint index;\n\tint inc = 1;\n\n#ifdef SIMD_COEF_32\n\tinc = NBKEYS;\n#endif\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += inc) {\n#ifdef SIMD_COEF_32\n\t\tunsigned int i;\n\n\t\tfor (i=0;idata.c, saved_salt->len);\n\t\tSHA1_Final( (unsigned char *)crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mscash2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, key, md4hash)", "context_chars": 100, "text": "ly when the\n\n\t// now get NTLM of the password (MD4 of unicode)\n\tif (new_key) {\n#if defined(_OPENMP)\nfor (i = 0; i < count; ++i) {\n\t\t\tint utf16len;\n\t\t\tUTF16 pass_unicode[PLAINTEXT_LENGTH+1];\n\t\t\tMD4_CTX ctx;\n\t\t\tutf16len = enc_to_utf16(pass_unicode, PLAINTEXT_LENGTH, &key[(PLAINTEXT_LENGTH + 1)*i], strlen((char*)&key[(PLAINTEXT_LENGTH + 1)*i]));\n\t\t\tif (utf16len <= 0) {\n\t\t\t\tkey[(PLAINTEXT_LENGTH + 1)*i-utf16len] = 0;\n\t\t\t\tif (utf16len != 0)\n\t\t\t\t\tutf16len = strlen16(pass_unicode);\n\t\t\t}\n\t\t\tMD4_Init(&ctx);\n\t\t\tMD4_Update(&ctx, pass_unicode, utf16len<<1);\n\t\t\tMD4_Final(&md4hash[HASH_LEN*i], &ctx);\n\t\t} #pragma omp parallel for default(none) private(i) shared(count, key, md4hash)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mscash2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "D4_Final(&md4hash[HASH_LEN*i], &ctx);\n\t\t}\n\t\tnew_key = 0;\n\t}\n\n#ifdef _OPENMP\n#if defined(WITH_UBSAN)\n#else\n#pragma omp parallel for default(none) shared(count, salt_buffer, salt_len, crypt_out, md4hash)\n\n\n\tfor (t1 = 0; t1 < count; t1 += MS_NUM_KEYS)\t{\n\t\tMD4_CTX ctx;\n\t\tint i;\n\t\tint t = t1 / MS_NUM_KEYS;\n\t\tfor (i = 0; i < MS_NUM_KEYS; ++i) {\n\t\t\t// Get DCC1. That is MD4( NTLM . unicode(lc username) )\n\t\t\tMD4_Init(&ctx);\n\t\t\tMD4_Update(&ctx, &md4hash[(t * MS_NUM_KEYS + i) * HASH_LEN], 16);\n\t\t\tMD4_Update(&ctx, salt_buffer, salt_len);\n\t\t\tMD4_Final((unsigned char*)&crypt_out[(t * MS_NUM_KEYS + i) * 4], &ctx);\n\t\t\t// now we have DCC1 (mscash) which is MD4 (MD4(unicode(pass)) . unicode(lc username))\n\n#ifndef SIMD_COEF_32\n\t\t\t// Non-SSE: Compute DCC2 one at a time\n\t\t\tpbkdf2(&crypt_out[(t * MS_NUM_KEYS + i) * 4]);\n\n\t\t}\n#ifdef SIMD_COEF_32\n\t\t// SSE: Compute DCC2 in parallel, once per thread\n\t\tpbkdf2_sse2(t);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mscash2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(count, salt_buffer, salt_len, crypt_out, md4hash)", "context_chars": 100, "text": "&ctx);\n\t\t}\n\t\tnew_key = 0;\n\t}\n\n#ifdef _OPENMP\n#if defined(WITH_UBSAN)\n#pragma omp parallel for\n#else\nfor (t1 = 0; t1 < count; t1 += MS_NUM_KEYS)\t{\n\t\tMD4_CTX ctx;\n\t\tint i;\n\t\tint t = t1 / MS_NUM_KEYS;\n\t\tfor (i = 0; i < MS_NUM_KEYS; ++i) {\n\t\t\t// Get DCC1. That is MD4( NTLM . unicode(lc username) )\n\t\t\tMD4_Init(&ctx);\n\t\t\tMD4_Update(&ctx, &md4hash[(t * MS_NUM_KEYS + i) * HASH_LEN], 16);\n\t\t\tMD4_Update(&ctx, salt_buffer, salt_len);\n\t\t\tMD4_Final((unsigned char*)&crypt_out[(t * MS_NUM_KEYS + i) * 4], &ctx);\n\t\t\t// now we have DCC1 (mscash) which is MD4 (MD4(unicode(pass)) . unicode(lc username))\n\n#ifndef SIMD_COEF_32\n\t\t\t// Non-SSE: Compute DCC2 one at a time\n\t\t\tpbkdf2(&crypt_out[(t * MS_NUM_KEYS + i) * 4]);\n\n\t\t}\n#ifdef SIMD_COEF_32\n\t\t// SSE: Compute DCC2 in parallel, once per thread\n\t\tpbkdf2_sse2(t);\n\n\t} #pragma omp parallel for default(none) shared(count, salt_buffer, salt_len, crypt_out, md4hash)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pgpsda_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char key[SHA1_DIGEST_LENGTH];\n\n\t\tCAST_KEY ck;\n\n\t\tpgpsda_kdf(saved_key[index], cur_salt->salt, key);\n\t\tCAST_set_key(&ck, 16, key);\n\t\tmemset((unsigned char*)crypt_out[index], 0, BINARY_SIZE);\n\t\tCAST_ecb_encrypt(key, (unsigned char*)crypt_out[index], &ck, CAST_ENCRYPT);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/geli_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][G_ELI_USERKEYLEN];\n\t\tunsigned char key[MIN_KEYS_PER_CRYPT][G_ELI_USERKEYLEN];\n\t\tint i;\n#ifdef SIMD_COEF_64\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha512_sse((const unsigned char**)pin, lens, cur_salt->md_salt, G_ELI_SALTLEN, cur_salt->md_iterations, pout, G_ELI_USERKEYLEN, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tpbkdf2_sha512((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->md_salt, G_ELI_SALTLEN, cur_salt->md_iterations, master[i], G_ELI_USERKEYLEN, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tJTR_hmac_sha512((const unsigned char*)\"\", 0, master[i], G_ELI_USERKEYLEN, key[i], G_ELI_USERKEYLEN);\n\t\t\tcracked[index+i] = geli_decrypt_verify(cur_salt, key[i]);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/net_ah_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tHMACMD5Context ctx;\n\n\t\thmac_md5_init_rfc2104((const unsigned char*)saved_key[index], strlen(saved_key[index]), &ctx);\n\t\thmac_md5_update(cur_salt->salt, cur_salt->length, &ctx);\n\t\thmac_md5_final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/hmacMD5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "crypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#if _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tint i;\n\n\t\tif (new_keys) {\n\t\t\tSIMDmd5body(&ipad[index * PAD_SIZE],\n\t\t\t (unsigned int*)&prep_ipad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN);\n\t\t\tSIMDmd5body(&opad[index * PAD_SIZE],\n\t\t\t (unsigned int*)&prep_opad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN);\n\t\t}\n\t\tSIMDmd5body(cur_salt->salt[0],\n\t\t (unsigned int*)&crypt_key[index * PAD_SIZE],\n\t\t (unsigned int*)&prep_ipad[index * BINARY_SIZE],\n\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);\n\t\tfor (i = 1; i <= (cur_salt->salt_len + 8) / PAD_SIZE; i++) {\n\t\t\tSIMDmd5body(cur_salt->salt[i],\n\t\t\t\t (unsigned int*)&crypt_key[index * PAD_SIZE],\n\t\t\t\t (unsigned int*)&crypt_key[index * PAD_SIZE],\n\t\t\t\t SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT);\n\t\t}\n\t\tSIMDmd5body(&crypt_key[index * PAD_SIZE],\n\t\t (unsigned int*)&crypt_key[index * PAD_SIZE],\n\t\t (unsigned int*)&prep_opad[index * BINARY_SIZE],\n\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);\n#else\n\tMD5_CTX ctx;\n\n\t\tif (new_keys) {\n\t\t\tMD5_Init(&ipad_ctx[index]);\n\t\t\tMD5_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);\n\t\t\tMD5_Init(&opad_ctx[index]);\n\t\t\tMD5_Update(&opad_ctx[index], opad[index], PAD_SIZE);\n\t\t}\n\n\t\tmemcpy(&ctx, &ipad_ctx[index], sizeof(ctx));\n\t\tMD5_Update(&ctx, cur_salt, strlen((char*)cur_salt));\n\t\tMD5_Final((unsigned char*) crypt_key[index], &ctx);\n\n\t\tmemcpy(&ctx, &opad_ctx[index], sizeof(ctx));\n\t\tMD5_Update(&ctx, crypt_key[index], BINARY_SIZE);\n\t\tMD5_Final((unsigned char*) crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ospf_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tint plen = strlen(saved_key[index]);\n\t\tunsigned char key[64];\n\t\tunsigned char out[64];\n\n\t\tif (cur_salt->type == 1) {\n\t\t\tSHA_CTX ctx;\n\n\t\t\t// process password according to rfc5709\n\t\t\tif (plen < SHA_DIGEST_LENGTH) {\n\t\t\t\tmemcpy(key, saved_key[index], plen);\n\t\t\t\tmemset(key + plen, 0, SHA_DIGEST_LENGTH - plen);\n\t\t\t} else if (plen == SHA_DIGEST_LENGTH) {\n\t\t\t\tmemcpy(key, saved_key[index], SHA_DIGEST_LENGTH);\n\t\t\t} else {\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, saved_key[index], plen);\n\t\t\t\tSHA1_Final(key, &ctx);\n\t\t\t}\n\t\t\t// salt already has ospf_apad appended\n\t\t\thmac_sha1(key, 20, cur_salt->salt, cur_salt->salt_length + SHA_DIGEST_LENGTH, out, 16);\n\t\t\tmemcpy((unsigned char*)crypt_out[index], out, 16);\n\t\t} else if (cur_salt->type == 2) {\n\t\t\tSHA256_CTX ctx;\n\n\t\t\tif (plen < SHA256_DIGEST_LENGTH) {\n\t\t\t\tmemcpy(key, saved_key[index], plen);\n\t\t\t\tmemset(key + plen, 0, SHA256_DIGEST_LENGTH - plen);\n\t\t\t} else if (plen == SHA256_DIGEST_LENGTH) {\n\t\t\t\tmemcpy(key, saved_key[index], SHA256_DIGEST_LENGTH);\n\t\t\t} else {\n\t\t\t\tSHA256_Init(&ctx);\n\t\t\t\tSHA256_Update(&ctx, saved_key[index], plen);\n\t\t\t\tSHA256_Final(key, &ctx);\n\t\t\t}\n\t\t\thmac_sha256(key, 32, cur_salt->salt, cur_salt->salt_length + SHA256_DIGEST_LENGTH, out, 16);\n\t\t\tmemcpy((unsigned char*)crypt_out[index], out, 16);\n\t\t} else if (cur_salt->type == 3) {\n\t\t\tSHA512_CTX ctx;\n\n\t\t\tif (plen < SHA384_DIGEST_LENGTH) {\n\t\t\t\tmemcpy(key, saved_key[index], plen);\n\t\t\t\tmemset(key + plen, 0, SHA384_DIGEST_LENGTH - plen);\n\t\t\t} else if (plen == SHA384_DIGEST_LENGTH) {\n\t\t\t\tmemcpy(key, saved_key[index], SHA384_DIGEST_LENGTH);\n\t\t\t} else {\n\t\t\t\tSHA384_Init(&ctx);\n\t\t\t\tSHA384_Update(&ctx, saved_key[index], plen);\n\t\t\t\tSHA384_Final(key, &ctx);\n\t\t\t}\n\t\t\thmac_sha384(key, 48, cur_salt->salt, cur_salt->salt_length + SHA384_DIGEST_LENGTH, out, 16);\n\t\t\tmemcpy((unsigned char*)crypt_out[index], out, 16);\n\t\t} else if (cur_salt->type == 4) {\n\t\t\tSHA512_CTX ctx;\n\n\t\t\tif (plen < SHA512_DIGEST_LENGTH) {\n\t\t\t\tmemcpy(key, saved_key[index], plen);\n\t\t\t\tmemset(key + plen, 0, SHA512_DIGEST_LENGTH - plen);\n\t\t\t} else if (plen == SHA512_DIGEST_LENGTH) {\n\t\t\t\tmemcpy(key, saved_key[index], SHA512_DIGEST_LENGTH);\n\t\t\t} else {\n\t\t\t\tSHA512_Init(&ctx);\n\t\t\t\tSHA512_Update(&ctx, saved_key[index], plen);\n\t\t\t\tSHA512_Final(key, &ctx);\n\t\t\t}\n\t\t\thmac_sha512(key, 64, cur_salt->salt, cur_salt->salt_length + SHA512_DIGEST_LENGTH, out, 16);\n\t\t\tmemcpy((unsigned char*)crypt_out[index], out, 16);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawSHA256_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tSIMDSHA256body(&saved_key[(unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32],\n\t\t &crypt_out[(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32],\n\t\t NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);\n#else\n\t\tSHA256_CTX ctx;\n\t\tSHA256_Init(&ctx);\n\t\tSHA256_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA256_Final((unsigned char *)crypt_out[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sha512crypt_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "x = 0; index < count; ++index)\n\t\tMixOrder[index] = index;\n\ttot_todo = count;\n#endif\n\n#ifdef _OPENMP\nfor (index = 0; index < tot_todo; index += MIN_KEYS_PER_CRYPT)\n\t{\n\t\t// portably align temp_result char * pointer machine word size.\n\t\tunion xx {\n\t\t\tunsigned char c[BINARY_SIZE];\n\t\t\tARCH_WORD a[BINARY_SIZE/sizeof(ARCH_WORD)];\n\t\t} u;\n\t\tunsigned char *temp_result = u.c;\n\t\tSHA512_CTX ctx;\n\t\tSHA512_CTX alt_ctx;\n\t\tsize_t cnt;\n\t\tint idx;\n\t\tchar *cp;\n\t\tchar p_bytes[PLAINTEXT_LENGTH+1];\n\t\tchar s_bytes[PLAINTEXT_LENGTH+1];\n\t\tchar tmp_cls[sizeof(cryptloopstruct)+MEM_ALIGN_SIMD];\n\t\tcryptloopstruct *crypt_struct;\n#ifdef SIMD_COEF_64\n\t\tchar tmp_sse_out[8*MIN_KEYS_PER_CRYPT*8+MEM_ALIGN_SIMD];\n\t\tuint64_t *sse_out;\n\t\tsse_out = (uint64_t *)mem_align(tmp_sse_out, MEM_ALIGN_SIMD);\n\n\t\tcrypt_struct = (cryptloopstruct *)mem_align(tmp_cls,MEM_ALIGN_SIMD);\n\n\t\tfor (idx = 0; idx < MIN_KEYS_PER_CRYPT; ++idx)\n\t\t{\n\t\t\t/* Prepare for the real work. */\n\t\t\tSHA512_Init(&ctx);\n\n\t\t\t/* Add the key string. */\n\t\t\tSHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* The last part is the salt string. This must be at most 16\n\t\t\t characters and it ends at the first `$' character (for\n\t\t\t compatibility with existing implementations). */\n\t\t\tSHA512_Update(&ctx, cur_salt->salt, cur_salt->len);\n\n\t\t\t/* Compute alternate SHA512 sum with input KEY, SALT, and KEY. The\n\t\t\t final result will be added to the first context. */\n\t\t\tSHA512_Init(&alt_ctx);\n\n\t\t\t/* Add key. */\n\t\t\tSHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* Add salt. */\n\t\t\tSHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);\n\n\t\t\t/* Add key again. */\n\t\t\tSHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* Now get result of this (64 bytes) and add it to the other\n\t\t\t context. */\n\t\t\tSHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &alt_ctx);\n\n\t\t\t/* Add for any character in the key one byte of the alternate sum. */\n\t\t\tfor (cnt = saved_len[MixOrder[index+idx]]; cnt > BINARY_SIZE; cnt -= BINARY_SIZE)\n\t\t\t\tSHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);\n\t\t\tSHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], cnt);\n\n\t\t\t/* Take the binary representation of the length of the key and for every\n\t\t\t 1 add the alternate sum, for every 0 the key. */\n\t\t\tfor (cnt = saved_len[MixOrder[index+idx]]; cnt > 0; cnt >>= 1)\n\t\t\t\tif ((cnt & 1) != 0)\n\t\t\t\t\tSHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);\n\t\t\t\telse\n\t\t\t\t\tSHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* Create intermediate result. */\n\t\t\tSHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &ctx);\n\n\t\t\t/* Start computation of P byte sequence. */\n\t\t\tSHA512_Init(&alt_ctx);\n\n\t\t\t/* For every character in the password add the entire password. */\n\t\t\tfor (cnt = 0; cnt < saved_len[MixOrder[index+idx]]; ++cnt)\n\t\t\t\tSHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);\n\n\t\t\t/* Finish the digest. */\n\t\t\tSHA512_Final(temp_result, &alt_ctx);\n\n\t\t\t/* Create byte sequence P. */\n\t\t\tcp = p_bytes;\n\t\t\tfor (cnt = saved_len[MixOrder[index+idx]]; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)\n\t\t\t\tcp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;\n\t\t\tmemcpy (cp, temp_result, cnt);\n\n\t\t\t/* Start computation of S byte sequence. */\n\t\t\tSHA512_Init(&alt_ctx);\n\n\t\t\t/* repeat the following 16+A[0] times, where A[0] represents the\n\t\t\t first byte in digest A interpreted as an 8-bit unsigned value */\n\t\t\tfor (cnt = 0; cnt < 16 + ((unsigned char*)crypt_out[MixOrder[index+idx]])[0]; ++cnt)\n\t\t\t\tSHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);\n\n\t\t\t/* Finish the digest. */\n\t\t\tSHA512_Final(temp_result, &alt_ctx);\n\n\t\t\t/* Create byte sequence S. */\n\t\t\tcp = s_bytes;\n\t\t\tfor (cnt = cur_salt->len; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)\n\t\t\t\tcp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;\n\t\t\tmemcpy (cp, temp_result, cnt);\n\n\t\t\t/* Repeatedly run the collected hash value through SHA512 to\n\t\t\t burn CPU cycles. */\n\t\t\tLoadCryptStruct(crypt_struct, MixOrder[index+idx], idx, p_bytes, s_bytes);\n\t\t}\n\n\t\tidx = 0;\n#ifdef SIMD_COEF_64\n\t\tfor (cnt = 1; ; ++cnt) {\n\t\t\tif (crypt_struct->datlen[idx]==256) {\n\t\t\t\tunsigned char *cp = crypt_struct->bufs[0][idx];\n\t\t\t\tSIMDSHA512body(cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);\n\t\t\t\tSIMDSHA512body(&cp[128], sse_out, sse_out, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK|SSEi_RELOAD);\n\t\t\t} else {\n\t\t\t\tunsigned char *cp = crypt_struct->bufs[0][idx];\n\t\t\t\tSIMDSHA512body(cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);\n\t\t\t}\n\t\t\tif (cnt == cur_salt->rounds)\n\t\t\t\tbreak;\n\t\t\t{\n\t\t\t\tunsigned int j, k;\n\t\t\t\tfor (k = 0; k < MIN_KEYS_PER_CRYPT; ++k) {\n\t\t\t\t\tuint64_t *o = (uint64_t *)crypt_struct->cptr[k][idx];\n#if !ARCH_ALLOWS_UNALIGNED\n\t\t\t\t\tif (!is_aligned(o, 8)) {\n\t\t\t\t\t\tunsigned char *cp = (unsigned char*)o;\n\t\t\t\t\t\tfor (j = 0; j < 64; ++j)\n\t\t\t\t\t\t\t*cp++ = ((unsigned char*)sse_out)[GETPOS(j, k)];\n\t\t\t\t\t} else\n\n\t\t\t\t\tfor (j = 0; j < 8; ++j)\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\t\t\t*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);\n#else\n\t\t\t\t\t\t*o++ = sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64];\n\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (++idx == 42)\n\t\t\t\tidx = 0;\n\t\t}\n\t\t{\n\t\t\tunsigned int j, k;\n\t\t\tfor (k = 0; k < MIN_KEYS_PER_CRYPT; ++k) {\n\t\t\t\tuint64_t *o = (uint64_t *)crypt_out[MixOrder[index+k]];\n\t\t\t\tfor (j = 0; j < 8; ++j)\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\t\t*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);\n#else\n\t\t\t\t\t*o++ = sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64];\n\n\t\t\t}\n\t\t}\n#else\n\t\tSHA512_Init(&ctx);\n\t\tfor (cnt = 1; ; ++cnt) {\n\t\t\t// calling with 128 byte, or 256 byte always, will force the update to properly crypt the data.\n\t\t\t// NOTE the data is fully formed. It ends in a 0x80, is padded with nulls, AND has bit appended.\n\t\t\tSHA512_Update(&ctx, crypt_struct->bufs[0][idx], crypt_struct->datlen[idx]);\n\t\t\tif (cnt == cur_salt->rounds)\n\t\t\t\tbreak;\n#if ARCH_LITTLE_ENDIAN\n\t\t\t{\n\t\t\t\tint j;\n\t\t\t\tuint64_t *o = (uint64_t *)crypt_struct->cptr[0][idx];\n\t\t\t\tfor (j = 0; j < 8; ++j)\n\t\t\t\t\t*o++ = JOHNSWAP64(ctx.h[j]);\n\t\t\t}\n#else\n\t\t\tmemcpy(crypt_struct->cptr[0][idx], ctx.h, BINARY_SIZE);\n\n\t\t\tif (++idx == 42)\n\t\t\t\tidx = 0;\n\n\t\t\t// this memcpy is 'good enough', used instead of SHA512_Init()\n\t\t\tmemcpy(ctx.h, ctx_init, sizeof(ctx_init));\n\t\t}\n#if ARCH_LITTLE_ENDIAN\n\t\t{\n\t\t\tint j;\n\t\t\tuint64_t *o = (uint64_t *)crypt_out[MixOrder[index]];\n\t\t\tfor (j = 0; j < 8; ++j)\n\t\t\t\t*o++ = JOHNSWAP64(ctx.h[j]);\n\t\t}\n#else\n\t\tmemcpy(crypt_out[MixOrder[index]], ctx.h, BINARY_SIZE);\n\n\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/chap_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tMD5_CTX ctx;\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, &cur_salt->id, 1);\n\t\tMD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tMD5_Update(&ctx, cur_salt->challenge, cur_salt->challenge_length);\n\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rsvp_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char buf[64];\n\n\t\tif (cur_salt->type == 1) {\n\t\t\tMD5_CTX ctx;\n\t\t\tif (new_keys[cur_salt->type]) {\n\t\t\t\tint i, len = strlen(saved_key[index]);\n\t\t\t\tunsigned char *p = (unsigned char*)saved_key[index];\n\t\t\t\tunsigned char pad[64];\n\n\t\t\t\tif (len > 64) {\n\t\t\t\t\tMD5_Init(&ctx);\n\t\t\t\t\tMD5_Update(&ctx, p, len);\n\t\t\t\t\tMD5_Final(buf, &ctx);\n\t\t\t\t\tlen = 16;\n\t\t\t\t\tp = buf;\n\t\t\t\t}\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x36;\n\t\t\t\t}\n\t\t\t\tMD5_Init(&ipad_mctx[index]);\n\t\t\t\tMD5_Update(&ipad_mctx[index], pad, len);\n\t\t\t\tif (len < 64)\n\t\t\t\t\tMD5_Update(&ipad_mctx[index], ipad_constant_block, 64-len);\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x5C;\n\t\t\t\t}\n\t\t\t\tMD5_Init(&opad_mctx[index]);\n\t\t\t\tMD5_Update(&opad_mctx[index], pad, len);\n\t\t\t\tif (len < 64)\n\t\t\t\t\tMD5_Update(&opad_mctx[index], opad_constant_block, 64-len);\n\t\t\t}\n\t\t\tmemcpy(&ctx, &ipad_mctx[index], sizeof(ctx));\n\t\t\tMD5_Update(&ctx, cur_salt->salt, cur_salt->salt_length);\n\t\t\tMD5_Final(buf, &ctx);\n\t\t\tmemcpy(&ctx, &opad_mctx[index], sizeof(ctx));\n\t\t\tMD5_Update(&ctx, buf, 16);\n\t\t\tMD5_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t\t} else if (cur_salt->type == 2) {\n\t\t\tSHA_CTX ctx;\n\n\t\t\tif (new_keys[cur_salt->type]) {\n\t\t\t\tint i, len = strlen(saved_key[index]);\n\t\t\t\tunsigned char *p = (unsigned char*)saved_key[index];\n\t\t\t\tunsigned char pad[64];\n\n\t\t\t\tif (len > 64) {\n\t\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\t\tSHA1_Update(&ctx, p, len);\n\t\t\t\t\tSHA1_Final(buf, &ctx);\n\t\t\t\t\tlen = 20;\n\t\t\t\t\tp = buf;\n\t\t\t\t}\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x36;\n\t\t\t\t}\n\t\t\t\tSHA1_Init(&ipad_ctx[index]);\n\t\t\t\tSHA1_Update(&ipad_ctx[index], pad, len);\n\t\t\t\tif (len < 64)\n\t\t\t\t\tSHA1_Update(&ipad_ctx[index], ipad_constant_block, 64-len);\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x5C;\n\t\t\t\t}\n\t\t\t\tSHA1_Init(&opad_ctx[index]);\n\t\t\t\tSHA1_Update(&opad_ctx[index], pad, len);\n\t\t\t\tif (len < 64)\n\t\t\t\t\tSHA1_Update(&opad_ctx[index], opad_constant_block, 64-len);\n\t\t\t}\n\t\t\tmemcpy(&ctx, &ipad_ctx[index], sizeof(ctx));\n\t\t\tSHA1_Update(&ctx, cur_salt->salt, cur_salt->salt_length);\n\t\t\tSHA1_Final(buf, &ctx);\n\t\t\tmemcpy(&ctx, &opad_ctx[index], sizeof(ctx));\n\t\t\tSHA1_Update(&ctx, buf, 20);\n\t\t\t// NOTE, this writes 20 bytes. That is why we had to bump up the size of each crypt_out[] value,\n\t\t\t// even though we only look at the first 16 bytes when comparing the saved binary.\n\t\t\tSHA1_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t\t} else if (cur_salt->type == 3) {\n\t\t\tSHA256_CTX ctx;\n\n\t\t\tif (new_keys[cur_salt->type]) {\n\t\t\t\tint i, len = strlen(saved_key[index]);\n\t\t\t\tunsigned char *p = (unsigned char*)saved_key[index];\n\t\t\t\tunsigned char pad[64];\n\n\t\t\t\tif (len > 64) {\n\t\t\t\t\tSHA224_Init(&ctx);\n\t\t\t\t\tSHA224_Update(&ctx, p, len);\n\t\t\t\t\tSHA224_Final(buf, &ctx);\n\t\t\t\t\tlen = 28;\n\t\t\t\t\tp = buf;\n\t\t\t\t}\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x36;\n\t\t\t\t}\n\t\t\t\tSHA224_Init(&ipad_ctx_224[index]);\n\t\t\t\tSHA224_Update(&ipad_ctx_224[index], pad, len);\n\t\t\t\tif (len < 64)\n\t\t\t\t\tSHA224_Update(&ipad_ctx_224[index], ipad_constant_block, 64-len);\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x5C;\n\t\t\t\t}\n\t\t\t\tSHA224_Init(&opad_ctx_224[index]);\n\t\t\t\tSHA224_Update(&opad_ctx_224[index], pad, len);\n\t\t\t\tif (len < 64)\n\t\t\t\t\tSHA224_Update(&opad_ctx_224[index], opad_constant_block, 64-len);\n\t\t\t}\n\t\t\tmemcpy(&ctx, &ipad_ctx_224[index], sizeof(ctx));\n\t\t\tSHA224_Update(&ctx, cur_salt->salt, cur_salt->salt_length);\n\t\t\tSHA224_Final(buf, &ctx);\n\t\t\tmemcpy(&ctx, &opad_ctx_224[index], sizeof(ctx));\n\t\t\tSHA224_Update(&ctx, buf, 28);\n\t\t\tSHA224_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t\t} else if (cur_salt->type == 4) {\n\t\t\tSHA256_CTX ctx;\n\n\t\t\tif (new_keys[cur_salt->type]) {\n\t\t\t\tint i, len = strlen(saved_key[index]);\n\t\t\t\tunsigned char *p = (unsigned char*)saved_key[index];\n\t\t\t\tunsigned char pad[64];\n\n\t\t\t\tif (len > 64) {\n\t\t\t\t\tSHA256_Init(&ctx);\n\t\t\t\t\tSHA256_Update(&ctx, p, len);\n\t\t\t\t\tSHA256_Final(buf, &ctx);\n\t\t\t\t\tlen = 32;\n\t\t\t\t\tp = buf;\n\t\t\t\t}\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x36;\n\t\t\t\t}\n\t\t\t\tSHA256_Init(&ipad_ctx_256[index]);\n\t\t\t\tSHA256_Update(&ipad_ctx_256[index], pad, len);\n\t\t\t\tif (len < 64)\n\t\t\t\t\tSHA256_Update(&ipad_ctx_256[index], ipad_constant_block, 64-len);\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x5C;\n\t\t\t\t}\n\t\t\t\tSHA256_Init(&opad_ctx_256[index]);\n\t\t\t\tSHA256_Update(&opad_ctx_256[index], pad, len);\n\t\t\t\tif (len < 64)\n\t\t\t\t\tSHA256_Update(&opad_ctx_256[index], opad_constant_block, 64-len);\n\t\t\t}\n\t\t\tmemcpy(&ctx, &ipad_ctx_256[index], sizeof(ctx));\n\t\t\tSHA256_Update(&ctx, cur_salt->salt, cur_salt->salt_length);\n\t\t\tSHA256_Final(buf, &ctx);\n\t\t\tmemcpy(&ctx, &opad_ctx_256[index], sizeof(ctx));\n\t\t\tSHA256_Update(&ctx, buf, 32);\n\t\t\tSHA256_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t\t} else if (cur_salt->type == 5) {\n\t\t\tSHA512_CTX ctx;\n\n\t\t\tif (new_keys[cur_salt->type]) {\n\t\t\t\tint i, len = strlen(saved_key[index]);\n\t\t\t\tunsigned char *p = (unsigned char*)saved_key[index];\n\t\t\t\tunsigned char pad[128];\n\n\t\t\t\tif (len > 128) {\n\t\t\t\t\tSHA384_Init(&ctx);\n\t\t\t\t\tSHA384_Update(&ctx, p, len);\n\t\t\t\t\tSHA384_Final(buf, &ctx);\n\t\t\t\t\tlen = 48;\n\t\t\t\t\tp = buf;\n\t\t\t\t}\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x36;\n\t\t\t\t}\n\t\t\t\tSHA384_Init(&ipad_ctx_384[index]);\n\t\t\t\tSHA384_Update(&ipad_ctx_384[index], pad, len);\n\t\t\t\tif (len < 128)\n\t\t\t\t\tSHA384_Update(&ipad_ctx_384[index], ipad_constant_block, 128-len);\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x5C;\n\t\t\t\t}\n\t\t\t\tSHA384_Init(&opad_ctx_384[index]);\n\t\t\t\tSHA384_Update(&opad_ctx_384[index], pad, len);\n\t\t\t\tif (len < 128)\n\t\t\t\t\tSHA384_Update(&opad_ctx_384[index], opad_constant_block, 128-len);\n\t\t\t}\n\t\t\tmemcpy(&ctx, &ipad_ctx_384[index], sizeof(ctx));\n\t\t\tSHA384_Update(&ctx, cur_salt->salt, cur_salt->salt_length);\n\t\t\tSHA384_Final(buf, &ctx);\n\t\t\tmemcpy(&ctx, &opad_ctx_384[index], sizeof(ctx));\n\t\t\tSHA384_Update(&ctx, buf, 48);\n\t\t\tSHA384_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t\t} else if (cur_salt->type == 6) {\n\t\t\tSHA512_CTX ctx;\n\n\t\t\tif (new_keys[cur_salt->type]) {\n\t\t\t\tint i, len = strlen(saved_key[index]);\n\t\t\t\tunsigned char *p = (unsigned char*)saved_key[index];\n\t\t\t\tunsigned char pad[128];\n\n\t\t\t\tif (len > 128) {\n\t\t\t\t\tSHA512_Init(&ctx);\n\t\t\t\t\tSHA512_Update(&ctx, p, len);\n\t\t\t\t\tSHA512_Final(buf, &ctx);\n\t\t\t\t\tlen = 64;\n\t\t\t\t\tp = buf;\n\t\t\t\t}\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x36;\n\t\t\t\t}\n\t\t\t\tSHA512_Init(&ipad_ctx_512[index]);\n\t\t\t\tSHA512_Update(&ipad_ctx_512[index], pad, len);\n\t\t\t\tif (len < 128)\n\t\t\t\t\tSHA512_Update(&ipad_ctx_512[index], ipad_constant_block, 128-len);\n\t\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\t\tpad[i] = p[i] ^ 0x5C;\n\t\t\t\t}\n\t\t\t\tSHA512_Init(&opad_ctx_512[index]);\n\t\t\t\tSHA512_Update(&opad_ctx_512[index], pad, len);\n\t\t\t\tif (len < 128)\n\t\t\t\t\tSHA512_Update(&opad_ctx_512[index], opad_constant_block, 128-len);\n\t\t\t}\n\t\t\tmemcpy(&ctx, &ipad_ctx_512[index], sizeof(ctx));\n\t\t\tSHA512_Update(&ctx, cur_salt->salt, cur_salt->salt_length);\n\t\t\tSHA512_Final(buf, &ctx);\n\t\t\tmemcpy(&ctx, &opad_ctx_512[index], sizeof(ctx));\n\t\t\tSHA512_Update(&ctx, buf, 64);\n\t\t\tSHA512_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/c3_fmt.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, crypt_data, stderr)", "context_chars": 100, "text": "tatic int warned = 0;\n\tint count = *pcount;\n\tint index;\n\n#if defined(_OPENMP) && defined(__GLIBC__)\nfor (index = 0; index < count; index++) {\n\t\tchar *hash;\n\t\tint t = omp_get_thread_num();\n\t\tif (t < MAX_THREADS) {\n\t\t\tstruct crypt_data **data = &crypt_data[t];\n\t\t\tif (!*data) {\n/* Stagger the structs to reduce their competition for the same cache lines */\n\t\t\t\tsize_t mask = MEM_ALIGN_PAGE, shift = 0;\n\t\t\t\twhile (t) {\n\t\t\t\t\tmask >>= 1;\n\t\t\t\t\tif (mask < MEM_ALIGN_CACHE)\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tif (t & 1)\n\t\t\t\t\t\tshift += mask;\n\t\t\t\t\tt >>= 1;\n\t\t\t\t}\n\t\t\t\t*data = (void *)((char *)\n\t\t\t\t mem_alloc_tiny(sizeof(**data) +\n\t\t\t\t shift, MEM_ALIGN_PAGE) + shift);\n\t\t\t\tmemset(*data, 0, sizeof(**data));\n\t\t\t}\n\t\t\thash = crypt_r(saved_key[index], saved_salt, *data);\n\t\t} else { /* should not happen */\n\t\t\tstruct crypt_data data;\n\t\t\tmemset(&data, 0, sizeof(data));\n\t\t\thash = crypt_r(saved_key[index], saved_salt, &data);\n\t\t}\n\t\tif (!hash) {\n#pragma omp critical\n\t\t\tif (!warned) {\n\t\t\t\tfprintf(stderr,\n\t\t\t\t \"Warning: crypt_r() returned NULL\\n\");\n\t\t\t\twarned = 1;\n\t\t\t}\n\t\t\thash = \"\";\n\t\t}\n\t\tstrnzcpy(crypt_out[index], hash, BINARY_SIZE);\n\t} #pragma omp parallel for default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, crypt_data, stderr)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/c3_fmt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "use for SHA-crypt and\n * SunMD5 hashes, which are not yet supported by non-jumbo John natively.\n */\n#else\n#undef FMT_OMP\n#define FMT_OMP 0\n\n\tfor (index = 0; index < count; index++) {\n\t\tchar *hash = crypt(saved_key[index], saved_salt);\n\t\tif (!hash) {\n#if defined(_OPENMP) && defined(__sun)\n#pragma omp critical\n\n\t\t\tif (!warned) {\n\t\t\t\tfprintf(stderr,\n\t\t\t\t \"Warning: crypt() returned NULL\\n\");\n\t\t\t\twarned = 1;\n\t\t\t}\n\t\t\thash = \"\";\n\t\t}\n\t\tstrnzcpy(crypt_out[index], hash, BINARY_SIZE);\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/md5crypt_long_fmt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tcrypt_md5(saved_key[index], (char*)cur_salt->salt, cur_salt->is_standard, (char *)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sl3_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tunsigned int i;\n\n\t\tfor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ntlmv1_mschapv2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_per_crypt = 0;\n\n#ifdef SIMD_COEF_32\n#if (BLOCK_LOOPS > 1)\n#if defined(_OPENMP) && defined(SSE_OMP)\nfor (i = 0; i < BLOCK_LOOPS; i++)\n\t\t\tSIMDmd4body(&saved_key[i * NBKEYS * 64], (unsigned int*)&nthash[i * NBKEYS * 16], NULL, SSEi_MIXED_IN);\n#else\n\t\tSIMDmd4body(saved_key, (unsigned int*)nthash, NULL, SSEi_MIXED_IN);\n\n\t\tif (use_bitmap)\n\t\t\tfor (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) {\n\t\t\t\tunsigned int value;\n\n\t\t\t\tvalue = *(uint32_t*)\n\t\t\t\t\t&nthash[GETOUTPOS_W32(3, i)] >> 16;\n\t\t\t\tcrypt_key[i] = value;\n#if defined(_OPENMP) && defined(SSE_OMP)\n#pragma omp atomic\n\n\t\t\t\tbitmap[value >> 5] |= 1U << (value & 0x1f);\n\t\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ntlmv1_mschapv2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\t\t\t\tcrypt_key[i] = *(uint32_t*)\n\t\t\t\t\t&nthash[GETOUTPOS_W32(3, i)] >> 16;\n\t\t\t}\n#else\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\t\tMD4_CTX ctx;\n\n\t\t\tMD4_Init( &ctx );\n\t\t\tMD4_Update(&ctx, saved_key[i], saved_len[i]);\n\t\t\tMD4_Final((uchar*)&nthash[i * 16], &ctx);\n\n\t\t\tcrypt_key[i] = ((unsigned short*)&nthash[i * 16])[7];\n\t\t\tif (use_bitmap) {\n\t\t\t\tunsigned int value = crypt_key[i];\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tbitmap[value >> 5] |= 1U << (value & 0x1f);\n\t\t\t}\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/wbb3_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char hexhash[40];\n\t\tSHA_CTX ctx;\n\n\t\tif (dirty) {\n\t\t\tunsigned char out[20];\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\t\tSHA1_Final(out, &ctx);\n\t\t\thex_encode(out, 20, hexhash1[index]);\n\t\t}\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, cur_salt->salt, 40);\n\t\tSHA1_Update(&ctx, hexhash1[index], 40);\n\t\tSHA1_Final((unsigned char*)crypt_out[index], &ctx);\n\t\thex_encode((unsigned char*)crypt_out[index], 20, hexhash);\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, cur_salt->salt, 40);\n\t\tSHA1_Update(&ctx, hexhash, 40);\n\t\tSHA1_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ansible_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, 32, 32);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tpbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, master[i], 32, 32);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tJTR_hmac_sha256(master[i], 32, cur_salt->blob, cur_salt->bloblen, (unsigned char*)crypt_out[index+i], 16);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/radius_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (check_password(index, cur_salt)) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pwsafe_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index+=MIN_KEYS_PER_CRYPT) {\n\t\tSHA256_CTX ctx;\n#ifdef SIMD_COEF_32\n\t\tunsigned int i;\n\t\tunsigned char _IBuf[64*MIN_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys, tmpBuf[32];\n\t\tuint32_t *keys32, j;\n\n\t\tkeys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);\n\t\tkeys32 = (uint32_t*)keys;\n\t\tmemset(keys, 0, 64*MIN_KEYS_PER_CRYPT);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tSHA256_Init(&ctx);\n\t\t\tSHA256_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i]));\n\t\t\tSHA256_Update(&ctx, cur_salt->salt, 32);\n\t\t\tSHA256_Final(tmpBuf, &ctx);\n\t\t\tfor (j = 0; j < 32; ++j)\n\t\t\t\tkeys[GETPOS(j, i)] = tmpBuf[j];\n\t\t\tkeys[GETPOS(j, i)] = 0x80;\n\t\t\t// 32 bytes of crypt data (0x100 bits).\n\t\t\tkeys[GETPOS(62, i)] = 0x01;\n\t\t}\n\t\tfor (i = 0; i < cur_salt->iterations; i++) {\n\t\t\tSIMDSHA256body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);\n\t\t}\n\t\t// Last one with FLAT_OUT\n\t\tSIMDSHA256body(keys, crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT);\n#else\n\t\tSHA256_Init(&ctx);\n\t\tSHA256_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tSHA256_Update(&ctx, cur_salt->salt, 32);\n\t\tSHA256_Final((unsigned char*)crypt_out[index], &ctx);\n#if 1\n\t\t// This complex crap only boosted speed on my quad-HT from 5016 to 5285.\n\t\t// A ton of complex code for VERY little gain. The SIMD change gave us\n\t\t// a 4x improvement with very little change. This pwsafe_sha256_iterate\n\t\t// does get 5% gain, but 400% is so much better, lol. I put the other\n\t\t// code in to be able to dump data out easier, getting dump_stuff()\n\t\t// data in flat, to be able to help get the SIMD code working.\n\t\tpwsafe_sha256_iterate(ctx.h, cur_salt->iterations);\n\t\tmemcpy(crypt_out[index], ctx.h, 32);\n#else\n\t\t{ int i;\n\t\tfor (i = 0; i <= cur_salt->iterations; ++i) {\n\t\t\tSHA256_Init(&ctx);\n\t\t\tSHA256_Update(&ctx, (unsigned char*)crypt_out[index], 32);\n\t\t\tSHA256_Final((unsigned char*)crypt_out[index], &ctx);\n\t\t} }\n\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sapH_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)", "context_chars": 100, "text": "key[index], BINARY_SIZE);\n}\n\nstatic void crypt_all_1(int count) {\n\tint idx=0;\n\n#if defined(_OPENMP)\nfor (idx = 0; idx < count; idx += NBKEYS1)\n\t{\n\t\tSHA_CTX ctx;\n\t\tuint32_t i;\n\n#if !defined (SIMD_COEF_32)\n\t\tuint32_t len = strlen(saved_plain[idx]);\n\t\tunsigned char tmp[PLAINTEXT_LENGTH+SHA1_BINARY_SIZE], *cp=&tmp[len];\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, saved_plain[idx], len);\n\t\tSHA1_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);\n\t\tstrcpy((char*)tmp, saved_plain[idx]);\n\t\tlen += SHA1_BINARY_SIZE;\n\t\tSHA1_Final(cp, &ctx);\n\t\tfor (i = 1; i < sapH_cur_salt->iter; ++i) {\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, tmp, len);\n\t\t\tSHA1_Final(cp, &ctx);\n\t\t}\n\t\tmemcpy(crypt_key[idx], cp, BINARY_SIZE);\n#else\n\t\tunsigned char _IBuf[64*NBKEYS1+MEM_ALIGN_SIMD], *keys, tmpBuf[20], _OBuf[20*NBKEYS1+MEM_ALIGN_SIMD], *crypt;\n\t\tuint32_t j, *crypt32, offs[NBKEYS1], len;\n\n\t\tkeys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);\n\t\tcrypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);\n\t\tcrypt32 = (uint32_t*)crypt;\n\t\tmemset(keys, 0, 64*NBKEYS1);\n\n\t\tfor (i = 0; i < NBKEYS1; ++i) {\n\t\t\tlen = strlen(saved_plain[idx+i]);\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, saved_plain[idx+i], len);\n\t\t\tSHA1_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);\n\t\t\tSHA1_Final(tmpBuf, &ctx);\n\t\t\tmemcpy(&keys[i<<6], saved_plain[idx+i], len);\n\t\t\tmemcpy(&keys[(i<<6)+len], tmpBuf, 20);\n\t\t\tkeys[(i<<6)+len+20] = 0x80;\n\t\t\toffs[i] = len;\n\t\t\tlen += 20;\n#if ARCH_LITTLE_ENDIAN\n\t\t\tkeys[(i<<6)+60] = (len<<3)&0xff;\n\t\t\tkeys[(i<<6)+61] = (len>>5);\n#else\n\t\t\tkeys[(i<<6)+62] = (len>>5);\n\t\t\tkeys[(i<<6)+63] = (len<<3)&0xff;\n\n\t\t}\n\t\tfor (i = 1; i < sapH_cur_salt->iter; ++i) {\n\t\t\tuint32_t k;\n\t\t\tSIMDSHA1body(keys, crypt32, NULL, SSEi_FLAT_IN);\n\t\t\tfor (k = 0; k < NBKEYS1; ++k) {\n\t\t\t\tuint32_t *pcrypt = &crypt32[ ((k/SIMD_COEF_32)*(SIMD_COEF_32*5)) + (k&(SIMD_COEF_32-1))];\n\t\t\t\tuint32_t *Icp32 = (uint32_t *)(&keys[(k<<6)+offs[k]]);\n\t\t\t\tfor (j = 0; j < 5; ++j) {\n\t\t\t\t\t// likely location for BE porting\n#if ARCH_ALLOWS_UNALIGNED\n #if ARCH_LITTLE_ENDIAN\n\t\t\t\t\tIcp32[j] = JOHNSWAP(*pcrypt);\n #else\n\t\t\t\t\tIcp32[j] = *pcrypt;\n \n#else\n #if ARCH_LITTLE_ENDIAN\n\t\t\t\t\tuint32_t tmp = JOHNSWAP(*pcrypt);\n\t\t\t\t\tmemcpy(&Icp32[j], &tmp, 4);\n #else\n\t\t\t\t\tmemcpy(&Icp32[j], pcrypt, 4);\n \n\n\t\t\t\t\tpcrypt += SIMD_COEF_32;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// now marshal into crypt_out;\n\t\tfor (i = 0; i < NBKEYS1; ++i) {\n\t\t\tuint32_t *Optr32 = (uint32_t*)(crypt_key[idx+i]);\n\t\t\tuint32_t *Iptr32 = &crypt32[ ((i/SIMD_COEF_32)*(SIMD_COEF_32*5)) + (i&(SIMD_COEF_32-1))];\n\t\t\t// we only want 16 bytes, not 20\n\t\t\tfor (j = 0; j < 4; ++j) {\n#if ARCH_LITTLE_ENDIAN\n\t\t\t\tOptr32[j] = JOHNSWAP(*Iptr32);\n#else\n\t\t\t\tOptr32[j] = *Iptr32;\n\n\t\t\t\tIptr32 += SIMD_COEF_32;\n\t\t\t}\n\t\t}\n\n\t} #pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sapH_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)", "context_chars": 100, "text": "COEF_32;\n\t\t\t}\n\t\t}\n#endif\n\t}\n}\nstatic void crypt_all_256(int count) {\n\tint idx;\n#if defined(_OPENMP)\nfor (idx = 0; idx < count; idx += NBKEYS256) {\n\t\tSHA256_CTX ctx;\n\t\tuint32_t i;\n\n#if !defined (SIMD_COEF_32)\n\t\tuint32_t len = strlen(saved_plain[idx]);\n\t\tunsigned char tmp[PLAINTEXT_LENGTH+SHA256_BINARY_SIZE], *cp=&tmp[len];\n\t\tSHA256_Init(&ctx);\n\t\tSHA256_Update(&ctx, saved_plain[idx], len);\n\t\tSHA256_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);\n\t\tstrcpy((char*)tmp, saved_plain[idx]);\n\t\tlen += SHA256_BINARY_SIZE;\n\t\tSHA256_Final(cp, &ctx);\n\t\tfor (i = 1; i < sapH_cur_salt->iter; ++i) {\n\t\t\tSHA256_Init(&ctx);\n\t\t\tSHA256_Update(&ctx, tmp, len);\n\t\t\tSHA256_Final(cp, &ctx);\n\t\t}\n\t\tmemcpy(crypt_key[idx], cp, BINARY_SIZE);\n#else\n\t\tunsigned char _IBuf[64*NBKEYS256+MEM_ALIGN_SIMD], *keys, tmpBuf[32], _OBuf[32*NBKEYS256+MEM_ALIGN_SIMD], *crypt;\n\t\tuint32_t j, *crypt32, offs[NBKEYS256], len;\n\n\t\tkeys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);\n\t\tcrypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);\n\t\tcrypt32 = (uint32_t*)crypt;\n\t\tmemset(keys, 0, 64*NBKEYS256);\n\n\t\tfor (i = 0; i < NBKEYS256; ++i) {\n\t\t\tlen = strlen(saved_plain[idx+i]);\n\t\t\tSHA256_Init(&ctx);\n\t\t\tSHA256_Update(&ctx, saved_plain[idx+i], len);\n\t\t\tSHA256_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);\n\t\t\tSHA256_Final(tmpBuf, &ctx);\n\t\t\tmemcpy(&keys[i<<6], saved_plain[idx+i], len);\n\t\t\tmemcpy(&keys[(i<<6)+len], tmpBuf, 32);\n\t\t\tkeys[(i<<6)+len+32] = 0x80;\n\t\t\toffs[i] = len;\n\t\t\tlen += 32;\n#if ARCH_LITTLE_ENDIAN\n\t\t\tkeys[(i<<6)+60] = (len<<3)&0xff;\n\t\t\tkeys[(i<<6)+61] = (len>>5);\n#else\n\t\t\tkeys[(i<<6)+62] = (len>>5);\n\t\t\tkeys[(i<<6)+63] = (len<<3)&0xff;\n\n\t\t}\n\t\tfor (i = 1; i < sapH_cur_salt->iter; ++i) {\n\t\t\tuint32_t k;\n\t\t\tSIMDSHA256body(keys, crypt32, NULL, SSEi_FLAT_IN);\n\t\t\tfor (k = 0; k < NBKEYS256; ++k) {\n\t\t\t\tuint32_t *pcrypt = &crypt32[ ((k/SIMD_COEF_32)*(SIMD_COEF_32*8)) + (k&(SIMD_COEF_32-1))];\n\t\t\t\tuint32_t *Icp32 = (uint32_t *)(&keys[(k<<6)+offs[k]]);\n\t\t\t\tfor (j = 0; j < 8; ++j) {\n#if ARCH_ALLOWS_UNALIGNED\n #if ARCH_LITTLE_ENDIAN\n\t\t\t\t\tIcp32[j] = JOHNSWAP(*pcrypt);\n #else\n\t\t\t\t\tIcp32[j] = *pcrypt;\n \n#else\n #if ARCH_LITTLE_ENDIAN\n\t\t\t\t\tuint32_t tmp = JOHNSWAP(*pcrypt);\n\t\t\t\t\tmemcpy(&Icp32[j], &tmp, 4);\n #else\n\t\t\t\t\tmemcpy(&Icp32[j], pcrypt, 4);\n \n\n\t\t\t\t\tpcrypt += SIMD_COEF_32;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// now marshal into crypt_out;\n\t\tfor (i = 0; i < NBKEYS256; ++i) {\n\t\t\tuint32_t *Optr32 = (uint32_t*)(crypt_key[idx+i]);\n\t\t\tuint32_t *Iptr32 = &crypt32[ ((i/SIMD_COEF_32)*(SIMD_COEF_32*8)) + (i&(SIMD_COEF_32-1))];\n\t\t\t// we only want 16 bytes, not 32\n\t\t\tfor (j = 0; j < 4; ++j) {\n#if ARCH_LITTLE_ENDIAN\n\t\t\t\tOptr32[j] = JOHNSWAP(*Iptr32);\n#else\n\t\t\t\tOptr32[j] = *Iptr32;\n\n\t\t\t\tIptr32 += SIMD_COEF_32;\n\t\t\t}\n\t\t}\n\n\t} #pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sapH_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)", "context_chars": 100, "text": "COEF_32;\n\t\t\t}\n\t\t}\n#endif\n\t}\n}\nstatic void crypt_all_384(int count) {\n\tint idx;\n#if defined(_OPENMP)\nfor (idx = 0; idx < count; idx+=NBKEYS512) {\n\t\tSHA512_CTX ctx;\n\t\tuint32_t i;\n\n#if !defined SIMD_COEF_64\n\t\tuint32_t len = strlen(saved_plain[idx]);\n\t\tunsigned char tmp[PLAINTEXT_LENGTH+SHA384_BINARY_SIZE], *cp=&tmp[len];\n\t\tSHA384_Init(&ctx);\n\t\tSHA384_Update(&ctx, saved_plain[idx], len);\n\t\tSHA384_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);\n\t\tstrcpy((char*)tmp, saved_plain[idx]);\n\t\tlen += SHA384_BINARY_SIZE;\n\t\tSHA384_Final(cp, &ctx);\n\t\tfor (i = 1; i < sapH_cur_salt->iter; ++i) {\n\t\t\tSHA384_Init(&ctx);\n\t\t\tSHA384_Update(&ctx, tmp, len);\n\t\t\tSHA384_Final(cp, &ctx);\n\t\t}\n\t\tmemcpy(crypt_key[idx], cp, BINARY_SIZE);\n#else\n\t\tunsigned char _IBuf[128*NBKEYS512+MEM_ALIGN_SIMD], *keys, tmpBuf[64], _OBuf[64*NBKEYS512+MEM_ALIGN_SIMD], *crypt;\n\t\tuint64_t j, *crypt64, offs[NBKEYS512];\n\t\tuint32_t len;\n\n\t\tkeys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);\n\t\tcrypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);\n\t\tcrypt64 = (uint64_t*)crypt;\n\t\tmemset(keys, 0, 128*NBKEYS512);\n\n\t\tfor (i = 0; i < NBKEYS512; ++i) {\n\t\t\tlen = strlen(saved_plain[idx+i]);\n\t\t\tSHA384_Init(&ctx);\n\t\t\tSHA384_Update(&ctx, saved_plain[idx+i], len);\n\t\t\tSHA384_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);\n\t\t\tSHA384_Final(tmpBuf, &ctx);\n\t\t\tmemcpy(&keys[i<<7], saved_plain[idx+i], len);\n\t\t\tmemcpy(&keys[(i<<7)+len], tmpBuf, 48);\n\t\t\tkeys[(i<<7)+len+48] = 0x80;\n\t\t\toffs[i] = len;\n\t\t\tlen += 48;\n#if ARCH_LITTLE_ENDIAN\n\t\t\tkeys[(i<<7)+120] = (len<<3)&0xff;\n\t\t\tkeys[(i<<7)+121] = (len>>5);\n#else\n\t\t\tkeys[(i<<7)+126] = (len>>5);\n\t\t\tkeys[(i<<7)+127] = (len<<3)&0xff;\n\n\t\t}\n\t\tfor (i = 1; i < sapH_cur_salt->iter; ++i) {\n\t\t\tuint32_t k;\n\t\t\tSIMDSHA512body(keys, crypt64, NULL, SSEi_FLAT_IN|SSEi_CRYPT_SHA384);\n\t\t\tfor (k = 0; k < NBKEYS512; ++k) {\n\t\t\t\tuint64_t *pcrypt = &crypt64[ ((k/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (k&(SIMD_COEF_64-1))];\n\t\t\t\tuint64_t *Icp64 = (uint64_t *)(&keys[(k<<7)+offs[k]]);\n\t\t\t\tfor (j = 0; j < 6; ++j) {\n#if ARCH_ALLOWS_UNALIGNED\n #if ARCH_LITTLE_ENDIAN\n\t\t\t\t\tIcp64[j] = JOHNSWAP64(*pcrypt);\n #else\n\t\t\t\t\tIcp64[j] = *pcrypt;\n \n#else\n #if ARCH_LITTLE_ENDIAN\n\t\t\t\t\tuint64_t tmp = JOHNSWAP64(*pcrypt);\n\t\t\t\t\tmemcpy(&Icp64[j], &tmp, 8);\n #else\n\t\t\t\t\tmemcpy(&Icp64[j], pcrypt, 8);\n \n\n\t\t\t\t\tpcrypt += SIMD_COEF_64;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// now marshal into crypt_out;\n\t\tfor (i = 0; i < NBKEYS512; ++i) {\n\t\t\tuint64_t *Optr64 = (uint64_t*)(crypt_key[idx+i]);\n\t\t\tuint64_t *Iptr64 = &crypt64[ ((i/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (i&(SIMD_COEF_64-1))];\n\t\t\t// we only want 16 bytes, not 48\n\t\t\tfor (j = 0; j < 2; ++j) {\n#if ARCH_LITTLE_ENDIAN\n\t\t\t\tOptr64[j] = JOHNSWAP64(*Iptr64);\n#else\n\t\t\t\tOptr64[j] = *Iptr64;\n\n\t\t\t\tIptr64 += SIMD_COEF_64;\n\t\t\t}\n\t\t}\n\n\t} #pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sapH_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)", "context_chars": 100, "text": "COEF_64;\n\t\t\t}\n\t\t}\n#endif\n\t}\n}\nstatic void crypt_all_512(int count) {\n\tint idx;\n#if defined(_OPENMP)\nfor (idx = 0; idx < count; idx+=NBKEYS512) {\n\t\tSHA512_CTX ctx;\n\t\tuint32_t i;\n#if !defined SIMD_COEF_64\n\t\tuint32_t len = strlen(saved_plain[idx]);\n\t\tunsigned char tmp[PLAINTEXT_LENGTH+SHA512_BINARY_SIZE], *cp=&tmp[len];\n\t\tSHA512_Init(&ctx);\n\t\tSHA512_Update(&ctx, saved_plain[idx], len);\n\t\tSHA512_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);\n\t\tstrcpy((char*)tmp, saved_plain[idx]);\n\t\tlen += SHA512_BINARY_SIZE;\n\t\tSHA512_Final(cp, &ctx);\n\t\tfor (i = 1; i < sapH_cur_salt->iter; ++i) {\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, tmp, len);\n\t\t\tSHA512_Final(cp, &ctx);\n\t\t}\n\t\tmemcpy(crypt_key[idx], cp, BINARY_SIZE);\n#else\n\t\tunsigned char _IBuf[128*NBKEYS512+MEM_ALIGN_SIMD], *keys, tmpBuf[64], _OBuf[64*NBKEYS512+MEM_ALIGN_SIMD], *crypt;\n\t\tuint64_t j, *crypt64, offs[NBKEYS512];\n\t\tuint32_t len;\n\n\t\tkeys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);\n\t\tcrypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);\n\t\tcrypt64 = (uint64_t*)crypt;\n\t\tmemset(keys, 0, 128*NBKEYS512);\n\n\t\tfor (i = 0; i < NBKEYS512; ++i) {\n\t\t\tlen = strlen(saved_plain[idx+i]);\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, saved_plain[idx+i], len);\n\t\t\tSHA512_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);\n\t\t\tSHA512_Final(tmpBuf, &ctx);\n\t\t\tmemcpy(&keys[i<<7], saved_plain[idx+i], len);\n\t\t\tmemcpy(&keys[(i<<7)+len], tmpBuf, 64);\n\t\t\tkeys[(i<<7)+len+64] = 0x80;\n\t\t\toffs[i] = len;\n\t\t\tlen += 64;\n#if ARCH_LITTLE_ENDIAN\n\t\t\tkeys[(i<<7)+120] = (len<<3)&0xff;\n\t\t\tkeys[(i<<7)+121] = (len>>5);\n#else\n\t\t\tkeys[(i<<7)+126] = (len>>5);\n\t\t\tkeys[(i<<7)+127] = (len<<3)&0xff;\n\n\t\t}\n\t\tfor (i = 1; i < sapH_cur_salt->iter; ++i) {\n\t\t\tuint32_t k;\n\t\t\tSIMDSHA512body(keys, crypt64, NULL, SSEi_FLAT_IN);\n\t\t\tfor (k = 0; k < NBKEYS512; ++k) {\n\t\t\t\tuint64_t *pcrypt = &crypt64[ ((k/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (k&(SIMD_COEF_64-1))];\n\t\t\t\tuint64_t *Icp64 = (uint64_t *)(&keys[(k<<7)+offs[k]]);\n\t\t\t\tfor (j = 0; j < 8; ++j) {\n#if ARCH_ALLOWS_UNALIGNED\n #if ARCH_LITTLE_ENDIAN\n\t\t\t\t\tIcp64[j] = JOHNSWAP64(*pcrypt);\n #else\n\t\t\t\t\tIcp64[j] = *pcrypt;\n \n#else\n #if ARCH_LITTLE_ENDIAN\n\t\t\t\t\tuint64_t tmp = JOHNSWAP64(*pcrypt);\n\t\t\t\t\tmemcpy(&Icp64[j], &tmp, 8);\n #else\n\t\t\t\t\tmemcpy(&Icp64[j], pcrypt, 8);\n \n\n\t\t\t\t\tpcrypt += SIMD_COEF_64;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// now marshal into crypt_out;\n\t\tfor (i = 0; i < NBKEYS512; ++i) {\n\t\t\tuint64_t *Optr64 = (uint64_t*)(crypt_key[idx+i]);\n\t\t\tuint64_t *Iptr64 = &crypt64[((i/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (i&(SIMD_COEF_64-1))];\n\t\t\t// we only want 16 bytes, not 64\n\t\t\tfor (j = 0; j < 2; ++j) {\n#if ARCH_LITTLE_ENDIAN\n\t\t\t\tOptr64[j] = JOHNSWAP64(*Iptr64);\n#else\n\t\t\t\tOptr64[j] = *Iptr64;\n\n\t\t\t\tIptr64 += SIMD_COEF_64;\n\t\t\t}\n\t\t}\n\n\t} #pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mozilla_ng_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tSHA_CTX ctx, ctxi, ctxo;\n\t\tint i;\n\t\tunion {\n\t\t\tunsigned char uc[64];\n\t\t\tuint32_t ui[64/4];\n\t\t} pad;\n\t\tunsigned char buffer[20];\n\t\tunsigned char tk[20];\n\t\tunsigned char key[40];\n\t\tDES_cblock ivec;\n\t\tDES_key_schedule ks1, ks2, ks3;\n\n\t\t// HP = SHA1(global-salt||password)\n\t\t// Copy already calculated partial hash data\n\t\tmemcpy(&ctx, &cur_salt->pctx, sizeof(SHA_CTX));\n\t\tSHA1_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA1_Final(buffer, &ctx);\n\n\t\t// CHP = SHA1(HP||entry-salt) // entry-salt (ES) is local_salt\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, buffer, 20);\n\t\tSHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);\n\t\tSHA1_Final(buffer, &ctx);\n\n\t\t// Step 0 for all hmac, store off the first half (the key is the same for all 3)\n\t\t// this will avoid having to setup the ipad/opad 2 times, and also avoids 4 SHA calls\n\t\t// reducing the hmac calls from 12 SHA limbs, down to 8 and ipad/opad loads from 3\n\t\t// down to 1. It adds 4 CTX memcpy's, but that is a very fair trade off.\n\t\tSHA1_Init(&ctxi);\n\t\tSHA1_Init(&ctxo);\n\t\tmemset(pad.uc, 0x36, 64);\n\t\tfor (i = 0; i < 20; ++i)\n\t\t\tpad.uc[i] ^= buffer[i];\n\t\tSHA1_Update(&ctxi, pad.uc, 64);\n\t\tfor (i = 0; i < 64/4; ++i)\n\t\t\tpad.ui[i] ^= 0x36363636^0x5c5c5c5c;\n\t\tSHA1_Update(&ctxo, pad.uc, 64);\n\n\t\t// k1 = HMAC(PES||ES) // use CHP as the key, PES is ES which is zero padded to length 20\n\t\t// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key\n\t\tmemcpy(&ctx, &ctxi, sizeof(ctx));\n\t\tSHA1_Update(&ctx, cur_salt->local_salt, 20);\n\t\tSHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);\n\t\tSHA1_Final(buffer, &ctx);\n\t\tmemcpy(&ctx, &ctxo, sizeof(ctx));\n\t\tSHA1_Update(&ctx, buffer, 20);\n\t\tSHA1_Final(key, &ctx);\n\n\t\t// tk = HMAC(PES) // use CHP as the key\n\t\t// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key\n\t\tmemcpy(&ctx, &ctxi, sizeof(ctx));\n\t\tSHA1_Update(&ctx, cur_salt->local_salt, 20);\n\t\tSHA1_Final(buffer, &ctx);\n\t\tmemcpy(&ctx, &ctxo, sizeof(ctx));\n\t\tSHA1_Update(&ctx, buffer, 20);\n\t\tSHA1_Final(tk, &ctx);\n\n\t\t// k2 = HMAC(tk||ES) // use CHP as the key\n\t\t// NOTE, ctxi and ctxo are no longer needed after this hmac, so we simply use them\n\t\tSHA1_Update(&ctxi, tk, 20);\n\t\tSHA1_Update(&ctxi, cur_salt->local_salt, cur_salt->local_salt_length);\n\t\tSHA1_Final(buffer, &ctxi);\n\t\tSHA1_Update(&ctxo, buffer, 20);\n\t\tSHA1_Final(key+20, &ctxo);\n\n\t\t// k = k1||k2 // encrypt \"password-check\" string using this key\n\t\tDES_set_key_unchecked((DES_cblock *) key, &ks1);\n\t\tDES_set_key_unchecked((DES_cblock *) (key+8), &ks2);\n\t\tDES_set_key_unchecked((DES_cblock *) (key+16), &ks3);\n\t\tmemcpy(ivec, key + 32, 8); // last 8 bytes!\n\t\t// PKCS#5 padding (standard block padding)\n\t\tDES_ede3_cbc_encrypt((unsigned char*)\"password-check\\x02\\x02\", (unsigned char*)crypt_out[index], 16, &ks1, &ks2, &ks3, &ivec, DES_ENCRYPT);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs_b.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, salt, precheck)", "context_chars": 100, "text": "\n\tvolatile\n#endif\n\tint precheck = salt && !salt->bitmap;\n#endif\n\n#ifdef _OPENMP\n#ifdef vtestallones\n#else\n#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p)\n\n\n\tfor_each_t(n) {\n#if DES_BS_EXPAND\n\t\tDES_bs_vector *k;\n#else\n\t\tARCH_WORD **k;\n\n\t\tint iterations, rounds_and_swapped;\n#if DES_BS_VECTOR_LOOPS\n\t\tint depth;\n\n\n\t\tif (DES_bs_all.keys_changed)\n\t\t\tgoto finalize_keys;\n\nbody:\n#if DES_bs_mt\n\t\tDES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt);\n\n\n\t\t{\n\t\t\tvtype zero = vzero;\n\t\t\tDES_bs_clear_block\n\t\t}\n\n#if DES_BS_EXPAND\n\t\tk = DES_bs_all.KS.v;\n#else\n\t\tk = DES_bs_all.KS.p;\n\n\t\trounds_and_swapped = 8;\n\t\titerations = 25;\n\nstart:\n\t\tfor_each_depth()\n\t\ts1(x(0), x(1), x(2), x(3), x(4), x(5),\n\t\t\tz(40), z(48), z(54), z(62));\n\t\tfor_each_depth()\n\t\ts2(x(6), x(7), x(8), x(9), x(10), x(11),\n\t\t\tz(44), z(59), z(33), z(49));\n\t\tfor_each_depth()\n\t\ts3(y(7, 12), y(8, 13), y(9, 14),\n\t\t\ty(10, 15), y(11, 16), y(12, 17),\n\t\t\tz(55), z(47), z(61), z(37));\n\t\tfor_each_depth()\n\t\ts4(y(11, 18), y(12, 19), y(13, 20),\n\t\t\ty(14, 21), y(15, 22), y(16, 23),\n\t\t\tz(57), z(51), z(41), z(32));\n\t\tfor_each_depth()\n\t\ts5(x(24), x(25), x(26), x(27), x(28), x(29),\n\t\t\tz(39), z(45), z(56), z(34));\n\t\tfor_each_depth()\n\t\ts6(x(30), x(31), x(32), x(33), x(34), x(35),\n\t\t\tz(35), z(60), z(42), z(50));\n\t\tfor_each_depth()\n\t\ts7(y(23, 36), y(24, 37), y(25, 38),\n\t\t\ty(26, 39), y(27, 40), y(28, 41),\n\t\t\tz(63), z(43), z(53), z(38));\n\t\tfor_each_depth()\n\t\ts8(y(27, 42), y(28, 43), y(29, 44),\n\t\t\ty(30, 45), y(31, 46), y(0, 47),\n\t\t\tz(36), z(58), z(46), z(52));\n\n\t\tif (rounds_and_swapped == 0x100) goto next;\n\nswap:\n\t\tfor_each_depth()\n\t\ts1(x(48), x(49), x(50), x(51), x(52), x(53),\n\t\t\tz(8), z(16), z(22), z(30));\n\t\tfor_each_depth()\n\t\ts2(x(54), x(55), x(56), x(57), x(58), x(59),\n\t\t\tz(12), z(27), z(1), z(17));\n\t\tfor_each_depth()\n\t\ts3(y(39, 60), y(40, 61), y(41, 62),\n\t\t\ty(42, 63), y(43, 64), y(44, 65),\n\t\t\tz(23), z(15), z(29), z(5));\n\t\tfor_each_depth()\n\t\ts4(y(43, 66), y(44, 67), y(45, 68),\n\t\t\ty(46, 69), y(47, 70), y(48, 71),\n\t\t\tz(25), z(19), z(9), z(0));\n\t\tfor_each_depth()\n\t\ts5(x(72), x(73), x(74), x(75), x(76), x(77),\n\t\t\tz(7), z(13), z(24), z(2));\n\t\tfor_each_depth()\n\t\ts6(x(78), x(79), x(80), x(81), x(82), x(83),\n\t\t\tz(3), z(28), z(10), z(18));\n\t\tfor_each_depth()\n\t\ts7(y(55, 84), y(56, 85), y(57, 86),\n\t\t\ty(58, 87), y(59, 88), y(60, 89),\n\t\t\tz(31), z(11), z(21), z(6));\n\t\tfor_each_depth()\n\t\ts8(y(59, 90), y(60, 91), y(61, 92),\n\t\t\ty(62, 93), y(63, 94), y(32, 95),\n\t\t\tz(4), z(26), z(14), z(20));\n\n\t\tk += 96;\n\n\t\tif (--rounds_and_swapped) goto start;\n\t\tk -= (0x300 + 48);\n\t\trounds_and_swapped = 0x108;\n\t\tif (--iterations) goto swap;\n\n#ifdef vtestallones\n\t\tif (precheck) {\n\t\t\tstruct db_password *pw = salt->list;\n\t\t\tdo {\n\t\t\t\tuint32_t binary = *(uint32_t *)pw->binary;\n\t\t\t\tfor_each_depth() {\n\t\t\t\t\tuint32_t u = binary;\n\t\t\t\t\tvtype mask = *z(26);\n\t\t\t\t\tif (u & (1 << 26))\n\t\t\t\t\t\tvnot(mask, mask);\n\t\t\t\t\tint bit = 0;\n\t\t\t\t\tdo {\n\t\t\t\t\t\tvtype v = *z(bit);\n\t\t\t\t\t\tif (u & 1)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 1);\n\t\t\t\t\t\tif (u & 2)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 2);\n\t\t\t\t\t\tif (u & 4)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 3);\n\t\t\t\t\t\tif (u & 8)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tu >>= 4;\n\t\t\t\t\t} while ((bit += 4) <= 28);\n\n#if DES_bs_mt\n\t\t\t\t\tprecheck = 0;\n\t\t\t\t\tgoto next_batch;\n#else\n\t\t\t\t\treturn keys_count;\n\n\nnext_depth:\n\t\t\t\t\t;\n\t\t\t\t}\n\t\t\t} while ((pw = pw->next));\n\t\t}\n#if DES_bs_mt\nnext_batch:\n\n\n\n#if DES_bs_mt\n\t\tcontinue;\n#else\n\t\treturn keys_count;\n\n\nnext:\n\t\tk -= (0x300 - 48);\n\t\trounds_and_swapped = 8;\n\t\titerations--;\n\t\tgoto start;\n\nfinalize_keys:\n\t\tDES_bs_all.keys_changed = 0;\n#if DES_bs_mt\n\t\tDES_bs_finalize_keys(t);\n#else\n\t\tDES_bs_finalize_keys();\n\n\t\tgoto body;\n\t} #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, salt, precheck)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs_b.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p)", "context_chars": 100, "text": "nes\n#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, salt, precheck)\n#else\nfor_each_t(n) {\n#if DES_BS_EXPAND\n\t\tDES_bs_vector *k;\n#else\n\t\tARCH_WORD **k;\n\n\t\tint iterations, rounds_and_swapped;\n#if DES_BS_VECTOR_LOOPS\n\t\tint depth;\n\n\n\t\tif (DES_bs_all.keys_changed)\n\t\t\tgoto finalize_keys;\n\nbody:\n#if DES_bs_mt\n\t\tDES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt);\n\n\n\t\t{\n\t\t\tvtype zero = vzero;\n\t\t\tDES_bs_clear_block\n\t\t}\n\n#if DES_BS_EXPAND\n\t\tk = DES_bs_all.KS.v;\n#else\n\t\tk = DES_bs_all.KS.p;\n\n\t\trounds_and_swapped = 8;\n\t\titerations = 25;\n\nstart:\n\t\tfor_each_depth()\n\t\ts1(x(0), x(1), x(2), x(3), x(4), x(5),\n\t\t\tz(40), z(48), z(54), z(62));\n\t\tfor_each_depth()\n\t\ts2(x(6), x(7), x(8), x(9), x(10), x(11),\n\t\t\tz(44), z(59), z(33), z(49));\n\t\tfor_each_depth()\n\t\ts3(y(7, 12), y(8, 13), y(9, 14),\n\t\t\ty(10, 15), y(11, 16), y(12, 17),\n\t\t\tz(55), z(47), z(61), z(37));\n\t\tfor_each_depth()\n\t\ts4(y(11, 18), y(12, 19), y(13, 20),\n\t\t\ty(14, 21), y(15, 22), y(16, 23),\n\t\t\tz(57), z(51), z(41), z(32));\n\t\tfor_each_depth()\n\t\ts5(x(24), x(25), x(26), x(27), x(28), x(29),\n\t\t\tz(39), z(45), z(56), z(34));\n\t\tfor_each_depth()\n\t\ts6(x(30), x(31), x(32), x(33), x(34), x(35),\n\t\t\tz(35), z(60), z(42), z(50));\n\t\tfor_each_depth()\n\t\ts7(y(23, 36), y(24, 37), y(25, 38),\n\t\t\ty(26, 39), y(27, 40), y(28, 41),\n\t\t\tz(63), z(43), z(53), z(38));\n\t\tfor_each_depth()\n\t\ts8(y(27, 42), y(28, 43), y(29, 44),\n\t\t\ty(30, 45), y(31, 46), y(0, 47),\n\t\t\tz(36), z(58), z(46), z(52));\n\n\t\tif (rounds_and_swapped == 0x100) goto next;\n\nswap:\n\t\tfor_each_depth()\n\t\ts1(x(48), x(49), x(50), x(51), x(52), x(53),\n\t\t\tz(8), z(16), z(22), z(30));\n\t\tfor_each_depth()\n\t\ts2(x(54), x(55), x(56), x(57), x(58), x(59),\n\t\t\tz(12), z(27), z(1), z(17));\n\t\tfor_each_depth()\n\t\ts3(y(39, 60), y(40, 61), y(41, 62),\n\t\t\ty(42, 63), y(43, 64), y(44, 65),\n\t\t\tz(23), z(15), z(29), z(5));\n\t\tfor_each_depth()\n\t\ts4(y(43, 66), y(44, 67), y(45, 68),\n\t\t\ty(46, 69), y(47, 70), y(48, 71),\n\t\t\tz(25), z(19), z(9), z(0));\n\t\tfor_each_depth()\n\t\ts5(x(72), x(73), x(74), x(75), x(76), x(77),\n\t\t\tz(7), z(13), z(24), z(2));\n\t\tfor_each_depth()\n\t\ts6(x(78), x(79), x(80), x(81), x(82), x(83),\n\t\t\tz(3), z(28), z(10), z(18));\n\t\tfor_each_depth()\n\t\ts7(y(55, 84), y(56, 85), y(57, 86),\n\t\t\ty(58, 87), y(59, 88), y(60, 89),\n\t\t\tz(31), z(11), z(21), z(6));\n\t\tfor_each_depth()\n\t\ts8(y(59, 90), y(60, 91), y(61, 92),\n\t\t\ty(62, 93), y(63, 94), y(32, 95),\n\t\t\tz(4), z(26), z(14), z(20));\n\n\t\tk += 96;\n\n\t\tif (--rounds_and_swapped) goto start;\n\t\tk -= (0x300 + 48);\n\t\trounds_and_swapped = 0x108;\n\t\tif (--iterations) goto swap;\n\n#ifdef vtestallones\n\t\tif (precheck) {\n\t\t\tstruct db_password *pw = salt->list;\n\t\t\tdo {\n\t\t\t\tuint32_t binary = *(uint32_t *)pw->binary;\n\t\t\t\tfor_each_depth() {\n\t\t\t\t\tuint32_t u = binary;\n\t\t\t\t\tvtype mask = *z(26);\n\t\t\t\t\tif (u & (1 << 26))\n\t\t\t\t\t\tvnot(mask, mask);\n\t\t\t\t\tint bit = 0;\n\t\t\t\t\tdo {\n\t\t\t\t\t\tvtype v = *z(bit);\n\t\t\t\t\t\tif (u & 1)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 1);\n\t\t\t\t\t\tif (u & 2)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 2);\n\t\t\t\t\t\tif (u & 4)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 3);\n\t\t\t\t\t\tif (u & 8)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tu >>= 4;\n\t\t\t\t\t} while ((bit += 4) <= 28);\n\n#if DES_bs_mt\n\t\t\t\t\tprecheck = 0;\n\t\t\t\t\tgoto next_batch;\n#else\n\t\t\t\t\treturn keys_count;\n\n\nnext_depth:\n\t\t\t\t\t;\n\t\t\t\t}\n\t\t\t} while ((pw = pw->next));\n\t\t}\n#if DES_bs_mt\nnext_batch:\n\n\n\n#if DES_bs_mt\n\t\tcontinue;\n#else\n\t\treturn keys_count;\n\n\nnext:\n\t\tk -= (0x300 - 48);\n\t\trounds_and_swapped = 8;\n\t\titerations--;\n\t\tgoto start;\n\nfinalize_keys:\n\t\tDES_bs_all.keys_changed = 0;\n#if DES_bs_mt\n\t\tDES_bs_finalize_keys(t);\n#else\n\t\tDES_bs_finalize_keys();\n\n\t\tgoto body;\n\t} #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs_b.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, count, keys_count)", "context_chars": 100, "text": "\n#if DES_bs_mt\n\tint t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;\n#endif\n\n#ifdef _OPENMP\nfor_each_t(n) {\n#if DES_BS_EXPAND\n\t\tDES_bs_vector *k;\n#else\n\t\tARCH_WORD **k;\n\n\t\tint iterations, rounds_and_swapped;\n#if DES_BS_VECTOR_LOOPS\n\t\tint depth;\n\n\n\t\tif (DES_bs_all.keys_changed)\n\t\t\tgoto finalize_keys;\n\nbody:\n#if DES_bs_mt\n\t\tDES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt);\n\n\n\t\t{\n\t\t\tvtype zero = vzero;\n\t\t\tDES_bs_clear_block\n\t\t}\n\n#if DES_BS_EXPAND\n\t\tk = DES_bs_all.KS.v;\n#else\n\t\tk = DES_bs_all.KS.p;\n\n\t\trounds_and_swapped = 8;\n\t\titerations = count;\n\nstart:\n\t\tfor_each_depth()\n\t\ts1(x(0), x(1), x(2), x(3), x(4), x(5),\n\t\t\tz(40), z(48), z(54), z(62));\n\t\tfor_each_depth()\n\t\ts2(x(6), x(7), x(8), x(9), x(10), x(11),\n\t\t\tz(44), z(59), z(33), z(49));\n\t\tfor_each_depth()\n\t\ts3(x(12), x(13), x(14), x(15), x(16), x(17),\n\t\t\tz(55), z(47), z(61), z(37));\n\t\tfor_each_depth()\n\t\ts4(x(18), x(19), x(20), x(21), x(22), x(23),\n\t\t\tz(57), z(51), z(41), z(32));\n\t\tfor_each_depth()\n\t\ts5(x(24), x(25), x(26), x(27), x(28), x(29),\n\t\t\tz(39), z(45), z(56), z(34));\n\t\tfor_each_depth()\n\t\ts6(x(30), x(31), x(32), x(33), x(34), x(35),\n\t\t\tz(35), z(60), z(42), z(50));\n\t\tfor_each_depth()\n\t\ts7(x(36), x(37), x(38), x(39), x(40), x(41),\n\t\t\tz(63), z(43), z(53), z(38));\n\t\tfor_each_depth()\n\t\ts8(x(42), x(43), x(44), x(45), x(46), x(47),\n\t\t\tz(36), z(58), z(46), z(52));\n\n\t\tif (rounds_and_swapped == 0x100) goto next;\n\nswap:\n\t\tfor_each_depth()\n\t\ts1(x(48), x(49), x(50), x(51), x(52), x(53),\n\t\t\tz(8), z(16), z(22), z(30));\n\t\tfor_each_depth()\n\t\ts2(x(54), x(55), x(56), x(57), x(58), x(59),\n\t\t\tz(12), z(27), z(1), z(17));\n\t\tfor_each_depth()\n\t\ts3(x(60), x(61), x(62), x(63), x(64), x(65),\n\t\t\tz(23), z(15), z(29), z(5));\n\t\tfor_each_depth()\n\t\ts4(x(66), x(67), x(68), x(69), x(70), x(71),\n\t\t\tz(25), z(19), z(9), z(0));\n\t\tfor_each_depth()\n\t\ts5(x(72), x(73), x(74), x(75), x(76), x(77),\n\t\t\tz(7), z(13), z(24), z(2));\n\t\tfor_each_depth()\n\t\ts6(x(78), x(79), x(80), x(81), x(82), x(83),\n\t\t\tz(3), z(28), z(10), z(18));\n\t\tfor_each_depth()\n\t\ts7(x(84), x(85), x(86), x(87), x(88), x(89),\n\t\t\tz(31), z(11), z(21), z(6));\n\t\tfor_each_depth()\n\t\ts8(x(90), x(91), x(92), x(93), x(94), x(95),\n\t\t\tz(4), z(26), z(14), z(20));\n\n\t\tk += 96;\n\n\t\tif (--rounds_and_swapped) goto start;\n\t\tk -= (0x300 + 48);\n\t\trounds_and_swapped = 0x108;\n\t\tif (--iterations) goto swap;\n#if DES_bs_mt\n\t\tcontinue;\n#else\n\t\treturn;\n\n\nnext:\n\t\tk -= (0x300 - 48);\n\t\trounds_and_swapped = 8;\n\t\tif (--iterations) goto start;\n#if DES_bs_mt\n\t\tcontinue;\n#else\n\t\treturn;\n\n\nfinalize_keys:\n\t\tDES_bs_all.keys_changed = 0;\n#if DES_bs_mt\n\t\tDES_bs_finalize_keys(t);\n#else\n\t\tDES_bs_finalize_keys();\n\n\t\tgoto body;\n\t} #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, count, keys_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs_b.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, salt, precheck)", "context_chars": 100, "text": "\n\tvolatile\n#endif\n\tint precheck = salt && !salt->bitmap;\n#endif\n\n#ifdef _OPENMP\n#ifdef vtestallones\n#else\n#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p)\n\n\n\tfor_each_t(n) {\n\t\tARCH_WORD **k;\n\t\tint rounds;\n#if DES_BS_VECTOR_LOOPS\n\t\tint depth;\n\n\n\t\t{\n\t\t\tvtype z = vzero, o = vones;\n\t\t\tDES_bs_set_block_8(0, z, z, z, z, z, z, z, z);\n\t\t\tDES_bs_set_block_8(8, o, o, o, z, o, z, z, z);\n\t\t\tDES_bs_set_block_8(16, z, z, z, z, z, z, z, o);\n\t\t\tDES_bs_set_block_8(24, z, z, o, z, z, o, o, o);\n\t\t\tDES_bs_set_block_8(32, z, z, z, o, z, o, o, o);\n\t\t\tDES_bs_set_block_8(40, z, z, z, z, z, o, z, z);\n\t\t\tDES_bs_set_block_8(48, o, o, z, z, z, z, o, z);\n\t\t\tDES_bs_set_block_8(56, o, z, o, z, o, o, o, o);\n\t\t}\n\n#if DES_bs_mt\n\t\tDES_bs_finalize_keys_LM(t);\n#else\n\t\tDES_bs_finalize_keys_LM();\n\n\n\t\tk = DES_bs_all.KS.p;\n\t\trounds = 8;\n\n\t\tdo {\n\t\t\tfor_each_depth()\n\t\t\ts1(y(31, 0), y(0, 1), y(1, 2),\n\t\t\t\ty(2, 3), y(3, 4), y(4, 5),\n\t\t\t\tz(40), z(48), z(54), z(62));\n\t\t\tfor_each_depth()\n\t\t\ts2(y(3, 6), y(4, 7), y(5, 8),\n\t\t\t\ty(6, 9), y(7, 10), y(8, 11),\n\t\t\t\tz(44), z(59), z(33), z(49));\n\t\t\tfor_each_depth()\n\t\t\ts3(y(7, 12), y(8, 13), y(9, 14),\n\t\t\t\ty(10, 15), y(11, 16), y(12, 17),\n\t\t\t\tz(55), z(47), z(61), z(37));\n\t\t\tfor_each_depth()\n\t\t\ts4(y(11, 18), y(12, 19), y(13, 20),\n\t\t\t\ty(14, 21), y(15, 22), y(16, 23),\n\t\t\t\tz(57), z(51), z(41), z(32));\n\t\t\tfor_each_depth()\n\t\t\ts5(y(15, 24), y(16, 25), y(17, 26),\n\t\t\t\ty(18, 27), y(19, 28), y(20, 29),\n\t\t\t\tz(39), z(45), z(56), z(34));\n\t\t\tfor_each_depth()\n\t\t\ts6(y(19, 30), y(20, 31), y(21, 32),\n\t\t\t\ty(22, 33), y(23, 34), y(24, 35),\n\t\t\t\tz(35), z(60), z(42), z(50));\n\t\t\tfor_each_depth()\n\t\t\ts7(y(23, 36), y(24, 37), y(25, 38),\n\t\t\t\ty(26, 39), y(27, 40), y(28, 41),\n\t\t\t\tz(63), z(43), z(53), z(38));\n\t\t\tfor_each_depth()\n\t\t\ts8(y(27, 42), y(28, 43), y(29, 44),\n\t\t\t\ty(30, 45), y(31, 46), y(0, 47),\n\t\t\t\tz(36), z(58), z(46), z(52));\n\n\t\t\tfor_each_depth()\n\t\t\ts1(y(63, 48), y(32, 49), y(33, 50),\n\t\t\t\ty(34, 51), y(35, 52), y(36, 53),\n\t\t\t\tz(8), z(16), z(22), z(30));\n\t\t\tfor_each_depth()\n\t\t\ts2(y(35, 54), y(36, 55), y(37, 56),\n\t\t\t\ty(38, 57), y(39, 58), y(40, 59),\n\t\t\t\tz(12), z(27), z(1), z(17));\n\t\t\tfor_each_depth()\n\t\t\ts3(y(39, 60), y(40, 61), y(41, 62),\n\t\t\t\ty(42, 63), y(43, 64), y(44, 65),\n\t\t\t\tz(23), z(15), z(29), z(5));\n\t\t\tfor_each_depth()\n\t\t\ts4(y(43, 66), y(44, 67), y(45, 68),\n\t\t\t\ty(46, 69), y(47, 70), y(48, 71),\n\t\t\t\tz(25), z(19), z(9), z(0));\n\t\t\tfor_each_depth()\n\t\t\ts5(y(47, 72), y(48, 73), y(49, 74),\n\t\t\t\ty(50, 75), y(51, 76), y(52, 77),\n\t\t\t\tz(7), z(13), z(24), z(2));\n\t\t\tfor_each_depth()\n\t\t\ts6(y(51, 78), y(52, 79), y(53, 80),\n\t\t\t\ty(54, 81), y(55, 82), y(56, 83),\n\t\t\t\tz(3), z(28), z(10), z(18));\n\t\t\tfor_each_depth()\n\t\t\ts7(y(55, 84), y(56, 85), y(57, 86),\n\t\t\t\ty(58, 87), y(59, 88), y(60, 89),\n\t\t\t\tz(31), z(11), z(21), z(6));\n\t\t\tfor_each_depth()\n\t\t\ts8(y(59, 90), y(60, 91), y(61, 92),\n\t\t\t\ty(62, 93), y(63, 94), y(32, 95),\n\t\t\t\tz(4), z(26), z(14), z(20));\n\n\t\t\tk += 96;\n\t\t} while (--rounds);\n\n#ifdef vtestallones\n\t\tif (precheck) {\n\t\t\tstruct db_password *pw = salt->list;\n\t\t\tdo {\n\t\t\t\tuint32_t binary = *(uint32_t *)pw->binary;\n\t\t\t\tfor_each_depth() {\n\t\t\t\t\tuint32_t u = binary;\n\t\t\t\t\tvtype mask = *z(20);\n\t\t\t\t\tif (u & (1 << 20))\n\t\t\t\t\t\tvnot(mask, mask);\n\t\t\t\t\tvtype v = *z(14);\n\t\t\t\t\tif (u & (1 << 14))\n\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\tv = *z(26);\n\t\t\t\t\tif (u & (1 << 26))\n\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\tint bit = 0;\n\t\t\t\t\tdo {\n\t\t\t\t\t\tv = *z(bit);\n\t\t\t\t\t\tif (u & 1)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 1);\n\t\t\t\t\t\tif (u & 2)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 2);\n\t\t\t\t\t\tif (u & 4)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 3);\n\t\t\t\t\t\tif (u & 8)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tu >>= 4;\n\t\t\t\t\t} while ((bit += 4) <= 28);\n\n#if DES_bs_mt\n\t\t\t\t\tprecheck = 0;\n\t\t\t\t\tgoto next_batch;\n#else\n\t\t\t\t\treturn keys_count;\n\n\nnext_depth:\n\t\t\t\t\t;\n\t\t\t\t}\n\t\t\t} while ((pw = pw->next));\n\t\t}\n#if DES_bs_mt\nnext_batch:\n\t\t;\n\n\n\t} #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, salt, precheck)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs_b.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p)", "context_chars": 100, "text": "nes\n#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, salt, precheck)\n#else\nfor_each_t(n) {\n\t\tARCH_WORD **k;\n\t\tint rounds;\n#if DES_BS_VECTOR_LOOPS\n\t\tint depth;\n\n\n\t\t{\n\t\t\tvtype z = vzero, o = vones;\n\t\t\tDES_bs_set_block_8(0, z, z, z, z, z, z, z, z);\n\t\t\tDES_bs_set_block_8(8, o, o, o, z, o, z, z, z);\n\t\t\tDES_bs_set_block_8(16, z, z, z, z, z, z, z, o);\n\t\t\tDES_bs_set_block_8(24, z, z, o, z, z, o, o, o);\n\t\t\tDES_bs_set_block_8(32, z, z, z, o, z, o, o, o);\n\t\t\tDES_bs_set_block_8(40, z, z, z, z, z, o, z, z);\n\t\t\tDES_bs_set_block_8(48, o, o, z, z, z, z, o, z);\n\t\t\tDES_bs_set_block_8(56, o, z, o, z, o, o, o, o);\n\t\t}\n\n#if DES_bs_mt\n\t\tDES_bs_finalize_keys_LM(t);\n#else\n\t\tDES_bs_finalize_keys_LM();\n\n\n\t\tk = DES_bs_all.KS.p;\n\t\trounds = 8;\n\n\t\tdo {\n\t\t\tfor_each_depth()\n\t\t\ts1(y(31, 0), y(0, 1), y(1, 2),\n\t\t\t\ty(2, 3), y(3, 4), y(4, 5),\n\t\t\t\tz(40), z(48), z(54), z(62));\n\t\t\tfor_each_depth()\n\t\t\ts2(y(3, 6), y(4, 7), y(5, 8),\n\t\t\t\ty(6, 9), y(7, 10), y(8, 11),\n\t\t\t\tz(44), z(59), z(33), z(49));\n\t\t\tfor_each_depth()\n\t\t\ts3(y(7, 12), y(8, 13), y(9, 14),\n\t\t\t\ty(10, 15), y(11, 16), y(12, 17),\n\t\t\t\tz(55), z(47), z(61), z(37));\n\t\t\tfor_each_depth()\n\t\t\ts4(y(11, 18), y(12, 19), y(13, 20),\n\t\t\t\ty(14, 21), y(15, 22), y(16, 23),\n\t\t\t\tz(57), z(51), z(41), z(32));\n\t\t\tfor_each_depth()\n\t\t\ts5(y(15, 24), y(16, 25), y(17, 26),\n\t\t\t\ty(18, 27), y(19, 28), y(20, 29),\n\t\t\t\tz(39), z(45), z(56), z(34));\n\t\t\tfor_each_depth()\n\t\t\ts6(y(19, 30), y(20, 31), y(21, 32),\n\t\t\t\ty(22, 33), y(23, 34), y(24, 35),\n\t\t\t\tz(35), z(60), z(42), z(50));\n\t\t\tfor_each_depth()\n\t\t\ts7(y(23, 36), y(24, 37), y(25, 38),\n\t\t\t\ty(26, 39), y(27, 40), y(28, 41),\n\t\t\t\tz(63), z(43), z(53), z(38));\n\t\t\tfor_each_depth()\n\t\t\ts8(y(27, 42), y(28, 43), y(29, 44),\n\t\t\t\ty(30, 45), y(31, 46), y(0, 47),\n\t\t\t\tz(36), z(58), z(46), z(52));\n\n\t\t\tfor_each_depth()\n\t\t\ts1(y(63, 48), y(32, 49), y(33, 50),\n\t\t\t\ty(34, 51), y(35, 52), y(36, 53),\n\t\t\t\tz(8), z(16), z(22), z(30));\n\t\t\tfor_each_depth()\n\t\t\ts2(y(35, 54), y(36, 55), y(37, 56),\n\t\t\t\ty(38, 57), y(39, 58), y(40, 59),\n\t\t\t\tz(12), z(27), z(1), z(17));\n\t\t\tfor_each_depth()\n\t\t\ts3(y(39, 60), y(40, 61), y(41, 62),\n\t\t\t\ty(42, 63), y(43, 64), y(44, 65),\n\t\t\t\tz(23), z(15), z(29), z(5));\n\t\t\tfor_each_depth()\n\t\t\ts4(y(43, 66), y(44, 67), y(45, 68),\n\t\t\t\ty(46, 69), y(47, 70), y(48, 71),\n\t\t\t\tz(25), z(19), z(9), z(0));\n\t\t\tfor_each_depth()\n\t\t\ts5(y(47, 72), y(48, 73), y(49, 74),\n\t\t\t\ty(50, 75), y(51, 76), y(52, 77),\n\t\t\t\tz(7), z(13), z(24), z(2));\n\t\t\tfor_each_depth()\n\t\t\ts6(y(51, 78), y(52, 79), y(53, 80),\n\t\t\t\ty(54, 81), y(55, 82), y(56, 83),\n\t\t\t\tz(3), z(28), z(10), z(18));\n\t\t\tfor_each_depth()\n\t\t\ts7(y(55, 84), y(56, 85), y(57, 86),\n\t\t\t\ty(58, 87), y(59, 88), y(60, 89),\n\t\t\t\tz(31), z(11), z(21), z(6));\n\t\t\tfor_each_depth()\n\t\t\ts8(y(59, 90), y(60, 91), y(61, 92),\n\t\t\t\ty(62, 93), y(63, 94), y(32, 95),\n\t\t\t\tz(4), z(26), z(14), z(20));\n\n\t\t\tk += 96;\n\t\t} while (--rounds);\n\n#ifdef vtestallones\n\t\tif (precheck) {\n\t\t\tstruct db_password *pw = salt->list;\n\t\t\tdo {\n\t\t\t\tuint32_t binary = *(uint32_t *)pw->binary;\n\t\t\t\tfor_each_depth() {\n\t\t\t\t\tuint32_t u = binary;\n\t\t\t\t\tvtype mask = *z(20);\n\t\t\t\t\tif (u & (1 << 20))\n\t\t\t\t\t\tvnot(mask, mask);\n\t\t\t\t\tvtype v = *z(14);\n\t\t\t\t\tif (u & (1 << 14))\n\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\tv = *z(26);\n\t\t\t\t\tif (u & (1 << 26))\n\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\tint bit = 0;\n\t\t\t\t\tdo {\n\t\t\t\t\t\tv = *z(bit);\n\t\t\t\t\t\tif (u & 1)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 1);\n\t\t\t\t\t\tif (u & 2)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 2);\n\t\t\t\t\t\tif (u & 4)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tv = *z(bit + 3);\n\t\t\t\t\t\tif (u & 8)\n\t\t\t\t\t\t\tvnot(v, v);\n\t\t\t\t\t\tvor(mask, mask, v);\n\t\t\t\t\t\tif (vtestallones(mask)) goto next_depth;\n\t\t\t\t\t\tu >>= 4;\n\t\t\t\t\t} while ((bit += 4) <= 28);\n\n#if DES_bs_mt\n\t\t\t\t\tprecheck = 0;\n\t\t\t\t\tgoto next_batch;\n#else\n\t\t\t\t\treturn keys_count;\n\n\nnext_depth:\n\t\t\t\t\t;\n\t\t\t\t}\n\t\t\t} while ((pw = pw->next));\n\t\t}\n#if DES_bs_mt\nnext_batch:\n\t\t;\n\n\n\t} #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs_b.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, DES_bs_P)", "context_chars": 100, "text": "\n#if DES_bs_mt\n\tint t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;\n#endif\n\n#ifdef _OPENMP\nfor_each_t(n) {\n\t\tARCH_WORD **k;\n\t\tint rounds;\n#if DES_BS_VECTOR\n\t\tint depth;\n\n\t\tint i;\n\n\t\tfor (i = 0; i < 64; i++) {\n#if DES_BS_VECTOR\n\t\t\tfor (depth = 0; depth < DES_BS_VECTOR; depth++)\n\t\t\t\tDES_bs_all.B[i][depth] = DES_bs_P[i][depth];\n#else\n\t\t\tDES_bs_all.B[i] = DES_bs_P[i];\n\n\t\t}\n\n#if DES_bs_mt\n\t\tDES_bs_finalize_keys_plain(t);\n#else\n\t\tDES_bs_finalize_keys_plain();\n\n\n\t\tk = DES_bs_all.KS.p;\n\t\trounds = 8;\n\n\t\tdo {\n\t\t\tfor_each_depth()\n\t\t\ts1(y(31, 0), y(0, 1), y(1, 2),\n\t\t\t\ty(2, 3), y(3, 4), y(4, 5),\n\t\t\t\tz(40), z(48), z(54), z(62));\n\t\t\tfor_each_depth()\n\t\t\ts2(y(3, 6), y(4, 7), y(5, 8),\n\t\t\t\ty(6, 9), y(7, 10), y(8, 11),\n\t\t\t\tz(44), z(59), z(33), z(49));\n\t\t\tfor_each_depth()\n\t\t\ts3(y(7, 12), y(8, 13), y(9, 14),\n\t\t\t\ty(10, 15), y(11, 16), y(12, 17),\n\t\t\t\tz(55), z(47), z(61), z(37));\n\t\t\tfor_each_depth()\n\t\t\ts4(y(11, 18), y(12, 19), y(13, 20),\n\t\t\t\ty(14, 21), y(15, 22), y(16, 23),\n\t\t\t\tz(57), z(51), z(41), z(32));\n\t\t\tfor_each_depth()\n\t\t\ts5(y(15, 24), y(16, 25), y(17, 26),\n\t\t\t\ty(18, 27), y(19, 28), y(20, 29),\n\t\t\t\tz(39), z(45), z(56), z(34));\n\t\t\tfor_each_depth()\n\t\t\ts6(y(19, 30), y(20, 31), y(21, 32),\n\t\t\t\ty(22, 33), y(23, 34), y(24, 35),\n\t\t\t\tz(35), z(60), z(42), z(50));\n\t\t\tfor_each_depth()\n\t\t\ts7(y(23, 36), y(24, 37), y(25, 38),\n\t\t\t\ty(26, 39), y(27, 40), y(28, 41),\n\t\t\t\tz(63), z(43), z(53), z(38));\n\t\t\tfor_each_depth()\n\t\t\ts8(y(27, 42), y(28, 43), y(29, 44),\n\t\t\t\ty(30, 45), y(31, 46), y(0, 47),\n\t\t\t\tz(36), z(58), z(46), z(52));\n\n\t\t\tfor_each_depth()\n\t\t\ts1(y(63, 48), y(32, 49), y(33, 50),\n\t\t\t\ty(34, 51), y(35, 52), y(36, 53),\n\t\t\t\tz(8), z(16), z(22), z(30));\n\t\t\tfor_each_depth()\n\t\t\ts2(y(35, 54), y(36, 55), y(37, 56),\n\t\t\t\ty(38, 57), y(39, 58), y(40, 59),\n\t\t\t\tz(12), z(27), z(1), z(17));\n\t\t\tfor_each_depth()\n\t\t\ts3(y(39, 60), y(40, 61), y(41, 62),\n\t\t\t\ty(42, 63), y(43, 64), y(44, 65),\n\t\t\t\tz(23), z(15), z(29), z(5));\n\t\t\tfor_each_depth()\n\t\t\ts4(y(43, 66), y(44, 67), y(45, 68),\n\t\t\t\ty(46, 69), y(47, 70), y(48, 71),\n\t\t\t\tz(25), z(19), z(9), z(0));\n\t\t\tfor_each_depth()\n\t\t\ts5(y(47, 72), y(48, 73), y(49, 74),\n\t\t\t\ty(50, 75), y(51, 76), y(52, 77),\n\t\t\t\tz(7), z(13), z(24), z(2));\n\t\t\tfor_each_depth()\n\t\t\ts6(y(51, 78), y(52, 79), y(53, 80),\n\t\t\t\ty(54, 81), y(55, 82), y(56, 83),\n\t\t\t\tz(3), z(28), z(10), z(18));\n\t\t\tfor_each_depth()\n\t\t\ts7(y(55, 84), y(56, 85), y(57, 86),\n\t\t\t\ty(58, 87), y(59, 88), y(60, 89),\n\t\t\t\tz(31), z(11), z(21), z(6));\n\t\t\tfor_each_depth()\n\t\t\ts8(y(59, 90), y(60, 91), y(61, 92),\n\t\t\t\ty(62, 93), y(63, 94), y(32, 95),\n\t\t\t\tz(4), z(26), z(14), z(20));\n\n\t\t\tk += 96;\n\t\t} while (--rounds);\n\t} #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, DES_bs_P)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sip_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\t/* password */\n\t\tMD5_CTX md5_ctx;\n\t\tunsigned char md5_bin_hash[MD5_LEN];\n\t\tchar dynamic_hash[MD5_LEN_HEX+1];\n\n\t\t/* Generate dynamic hash including pw (see above) */\n\t\t//MD5_Init(&md5_ctx);\n\t\t//MD5_Update(&md5_ctx, (unsigned char*)pSalt->dynamic_hash_data, pSalt->dynamic_hash_data_len);\n\t\t// salt.ctx_dyna_data contains the ctx already loaded.\n\t\tmemcpy(&md5_ctx, &(pSalt->ctx_dyna_data), sizeof(md5_ctx));\n\n\t\tMD5_Update(&md5_ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\tMD5_Final(md5_bin_hash, &md5_ctx);\n\t\tbin_to_hex(bin2hex_table, md5_bin_hash, MD5_LEN, dynamic_hash, MD5_LEN_HEX);\n\n\t\t/* Generate digest response hash */\n\t\tMD5_Init(&md5_ctx);\n\t\tMD5_Update(&md5_ctx, (unsigned char*)dynamic_hash, MD5_LEN_HEX);\n\t\tMD5_Update(&md5_ctx, (unsigned char*)pSalt->static_hash_data, pSalt->static_hash_data_len);\n\t\tMD5_Final((unsigned char*)crypt_key[index], &md5_ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/zed_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "if defined(SIMD_COEF_32)\n\t\tinc = SSE_GROUP_SZ_SHA256;\n#else\n\t\talgo = 256;\n#endif\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += inc) {\n#if !defined(SIMD_COEF_32)\n\t\tpkcs12_pbe_derive_key(algo, cur_salt->iteration_count,\n\t\t MBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t (unsigned char*)saved_key[index],\n\t\t saved_len[index], cur_salt->salt,\n\t\t salt_len,\n\t\t (unsigned char*)crypt_out[index],\n\t\t BINARY_SIZE);\n#else\n\t\tint j;\n\n\t\tif (cur_salt->algo == 21) {\n\t\t\tunsigned char *mackey[SSE_GROUP_SZ_SHA1];\n\t\t\tconst unsigned char *keys[SSE_GROUP_SZ_SHA1];\n\t\t\tsize_t lens[SSE_GROUP_SZ_SHA1];\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA1; j++) {\n\t\t\t\tmackey[j] = (unsigned char*)(crypt_out[index + j]);\n\t\t\t\tlens[j] = saved_len[index + j];\n\t\t\t\tkeys[j] = (const unsigned char*)(saved_key[index + j]);\n\t\t\t}\n\t\t\tpkcs12_pbe_derive_key_simd_sha1(cur_salt->iteration_count,\n\t\t\t MBEDTLS_PKCS12_DERIVE_MAC_KEY, keys,\n\t\t\t lens, cur_salt->salt,\n\t\t\t salt_len, mackey,\n\t\t\t BINARY_SIZE);\n\t\t} else if (cur_salt->algo == 22) {\n\t\t\tunsigned char *mackey[SSE_GROUP_SZ_SHA256];\n\t\t\tconst unsigned char *keys[SSE_GROUP_SZ_SHA256];\n\t\t\tsize_t lens[SSE_GROUP_SZ_SHA256];\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA256; j++) {\n\t\t\t\tmackey[j] = (unsigned char*)(crypt_out[index + j]);\n\t\t\t\tlens[j] = saved_len[index + j];\n\t\t\t\tkeys[j] = (const unsigned char*)(saved_key[index + j]);\n\t\t\t}\n\t\t\tpkcs12_pbe_derive_key_simd_sha256(cur_salt->iteration_count,\n\t\t\t MBEDTLS_PKCS12_DERIVE_MAC_KEY, keys,\n\t\t\t lens, cur_salt->salt,\n\t\t\t salt_len, mackey,\n\t\t\t BINARY_SIZE);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pst_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": " crypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint i;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; ++i) {\n\t\tCRC32_t crc = 0;\n\t\tunsigned char *p = (unsigned char*)saved_key[i];\n\t\twhile (*p)\n\t\t\tcrc = jtr_crc32(crc, *p++);\n\t\tcrypt_out[i] = crc;\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/notes_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0]) * cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][16];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, 16, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tpbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, master[i], 16, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tcracked[index+i] = notes_decrypt(cur_salt, master[i]);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/andotp_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (check_password(index, cur_salt)) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawSHA224_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tSIMDSHA256body(&saved_key[(unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32],\n\t\t &crypt_out[(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32],\n\t\t NULL, SSEi_REVERSE_STEPS|SSEi_MIXED_IN|SSEi_CRYPT_SHA224);\n#else\n\t\tSHA256_CTX ctx;\n\t\tSHA224_Init(&ctx);\n\t\tSHA224_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA224_Final((unsigned char *)crypt_out[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/dynamic_fmt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "fdef _OPENMP\n\tint i;\n\tunsigned int inc = OMP_MD5_INC;\n//\tif (dynamic_use_sse!=1)\n//\t\tinc = OMP_INC;\nfor (i = 0; i < m_count; i += inc)\n\t\tDynamicFunc__crypt2_md5(i,i+inc,omp_get_thread_num());\n#else\n\tDynamicFunc__crypt2_md5();\n\n}\n\nstatic void __nonMP_DynamicFunc__clean_input()\n{\n\tunsigned int i=0;\n#ifdef SIMD_COEF_32\n\tif (dynamic_use_sse==1) {\n\t\tmemset(input_buf, 0, MMX_INP_BUF_SZ);\n\t\tmemset(total_len, 0, MMX_TOT_LEN_SZ);\n\t\treturn;\n\t}\n\n\tfor (; i < MAX_KEYS_PER_CRYPT_X86; ++i) {\n\t\t//if (total_len_X86[i]) {\n#if MD5_X2\n\t\t\tif (i&1)\n\t\t\t\tmemset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));\n\t\t\telse\n\n\t\t\tmemset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));\n\t\t\ttotal_len_X86[i] = 0;\n\t\t//}\n\t}\n\treturn;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/dynamic_fmt.c", "omp_pragma_line": "#pragma omp parallel for shared(curdat, inc, m_count)", "context_chars": 100, "text": "ularity);\n\t\tinc = ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity;\nfor (j = 0; j < m_count; j += inc) {\n\t\t\tunsigned int i;\n\t\t\tunsigned int top=j+inc;\n\t\t\t/* The last block may 'appear' to have more keys than we have in the\n\t\t\t entire buffer space. This is due to the granularity. If so,\n\t\t\t reduce that last one to stop at end of our buffers. NOT doing\n\t\t\t this is causes a huge buffer overflow. */\n\t\t\tif (top > curdat.pFmtMain->params.max_keys_per_crypt)\n\t\t\t\ttop = curdat.pFmtMain->params.max_keys_per_crypt;\n\n\t\t\t// we now run a full script in this thread, using only a subset of\n\t\t\t// the data, from [j,top) The next thread will run from [top,top+inc)\n\t\t\t// each thread will take the next inc values, until we get to m_count\n\t\t\tfor (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i)\n\t\t\t\t(*(curdat.dynamic_FUNCTIONS[i]))(j,top,omp_get_thread_num());\n\t\t} #pragma omp parallel for shared(curdat, inc, m_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/lastpass_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tuint32_t key[MIN_KEYS_PER_CRYPT][8];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT];\n\t\tunion {\n\t\t\tuint32_t *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tx.pout[i] = key[i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, &(x.poutc), 32, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tpbkdf2_sha256((unsigned char*)saved_key[i+index], strlen(saved_key[i+index]), cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, (unsigned char*)key[i], 32, 0);\n\t\t}\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tAES_KEY akey;\n\n\t\t\tAES_set_encrypt_key((unsigned char*)key[i], 256, &akey);\n\t\t\tAES_ecb_encrypt((unsigned char*)\"lastpass rocks\\x02\\x02\", (unsigned char*)crypt_out[i+index], &akey, AES_ENCRYPT);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawSHA512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_64\n\t\tSIMDSHA512body(&saved_key[index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64],\n\t\t &crypt_out[index/SIMD_COEF_64*8*SIMD_COEF_64],\n\t\t NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);\n#else\n\t\tSHA512_CTX ctx;\n\t\tSHA512_Init(&ctx);\n\t\tSHA512_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA512_Final((unsigned char *)crypt_out[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/palshop_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char m1[53], buffer[16+20], *cp;\n\t\tint i;\n\t\tMD5_CTX mctx;\n\t\tSHA_CTX sctx;\n\n\t\t// m1 = md5($p)\n\t\tMD5_Init(&mctx);\n\t\tMD5_Update(&mctx, saved_key[index], saved_len[index]);\n\t\tMD5_Final(buffer, &mctx);\n\n\t\t// s1 = sha1($p)\n\t\tSHA1_Init(&sctx);\n\t\tSHA1_Update(&sctx, saved_key[index], saved_len[index]);\n\t\tSHA1_Final(buffer+16, &sctx);\n\n\t\t// data = m1[11:] + s1[:29] + m1[0:1] // 51 bytes!\n\t\tcp = m1;\n\t\t*cp++ = itoa16[buffer[5]&0xF];\n\t\tfor (i = 6; i < 25+6; ++i) {\n\t\t\tcp[0] = itoa16[buffer[i]>>4];\n\t\t\tcp[1] = itoa16[buffer[i]&0xF];\n\t\t\tcp += 2;\n\t\t}\n\t\tcp[-1] = itoa16[buffer[0]>>4];\n\n\n\t\t// m2\n\t\tMD5_Init(&mctx);\n\t\tMD5_Update(&mctx, m1, 51);\n\t\tMD5_Final(buffer, &mctx);\n\n\t\t// s2 = sha1(data)\n\t\t// SHA1_Init(&sctx);\n\t\t// SHA1_Update(&sctx, data, 51);\n\t\t// SHA1_Final((unsigned char*)crypt_out[index], &sctx);\n\t\t// hex_encode((unsigned char*)crypt_out[index], 20, s1);\n\n\t\t// hash = m2[11:] + s2[:29] + m2[0], but starting 20 bytes should be enough!\n\t\t//memcpy((unsigned char*)crypt_out[index], m2 + 11, 20);\n\n\t\t// we actually take m2[12:32] (skipping that first 'odd' byte.0\n\t\t// in binary now, skipping the unneeded hex conversion.\n\t\tmemcpy((unsigned char*)crypt_out[index], buffer+6, 10);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/racf_kdfaes_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rypt_all(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index = 0;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tint x, i, n, n_key, ml;\n\t\tchar mac1[32] = { 0 };\n\t\tchar t1[32] = { 0 };\n\t\tunsigned char key[32];\n\t\tunsigned char m[MAX_SALT_SIZE + HASH_OUTPUT_SIZE + 32];\n\t\tunsigned char *t1f = mem_alloc(HASH_OUTPUT_SIZE * cur_salt->mfact);\n\t\tunsigned char *h_out = (unsigned char*)crypt_out[index];\n\t\tunsigned char plaint[16];\n\t\tAES_KEY akey;\n\t\tunsigned char zeroiv[16];\n\t\tunsigned char dh[8];\n\n\t\tml = cur_salt->length;\n\t\tmemset(key, 0, sizeof(key));\n\t\tmemcpy(m, cur_salt->salt, ml);\n\n\t\t// get des hash\n\t\tget_des_hash(saved_key[index], dh); // k1\n\n\t\t// kdf\n\t\tfor (n = 0; n < cur_salt->mfact; n++) {\n\t\t\tJTR_hmac_sha256(dh, 8, m, ml, h_out, HASH_OUTPUT_SIZE);\n\n\t\t\tml = 32;\n\t\t\tmemcpy(t1, h_out, HASH_OUTPUT_SIZE);\n\t\t\tfor (x = 0; x < (cur_salt->rfact*100)-1 ; x++) {\n\t\t\t\tmemcpy(mac1, h_out, HASH_OUTPUT_SIZE);\n\t\t\t\tJTR_hmac_sha256(dh, 8, h_out, ml, h_out, HASH_OUTPUT_SIZE);\n\t\t\t\tfor (i = 0; i < HASH_OUTPUT_SIZE; i++)\n\t\t\t\t\tt1[i] ^= h_out[i];\n\t\t\t}\n\n\t\t\tmemcpy(m, mac1, 16);\n\t\t\tmemcpy(m+16, t1, HASH_OUTPUT_SIZE);\n\t\t\tmemcpy(m+48, \"\\x00\\x00\\x00\\x01\", 4);\n\t\t\tml = 52;\n\t\t\tmemcpy(t1f+(n*HASH_OUTPUT_SIZE), t1, HASH_OUTPUT_SIZE);\n\t\t}\n\n\t\tmemcpy(key, t1, 32);\n\n\t\tfor (n = 0; n < cur_salt->mfact; n++) {\n\t\t\tn_key = (((uint32_t)key[30] << 8) | key[31]) & (cur_salt->mfact - 1);\n\t\t\tmemcpy(m, t1f + (n_key * HASH_OUTPUT_SIZE), HASH_OUTPUT_SIZE);\n\t\t\tmemcpy(m + HASH_OUTPUT_SIZE, \"\\x00\\x00\\x00\\x01\", 4);\n\t\t\tJTR_hmac_sha256(key, HASH_OUTPUT_SIZE, m, HASH_OUTPUT_SIZE + 4, h_out, HASH_OUTPUT_SIZE);\n\t\t\tmemcpy(t1f + (n*HASH_OUTPUT_SIZE), h_out, HASH_OUTPUT_SIZE);\n\t\t\tmemcpy(key, h_out, HASH_OUTPUT_SIZE);\n\t\t}\n\n\t\tmemcpy(t1f + (HASH_OUTPUT_SIZE * (cur_salt->mfact-1)), \"\\x00\\x00\\x00\\x01\", 4);\n\t\tml = (HASH_OUTPUT_SIZE * (cur_salt->mfact-1))+4;\n\t\tJTR_hmac_sha256(key, HASH_OUTPUT_SIZE, t1f, ml, h_out, HASH_OUTPUT_SIZE);\n\n\t\tml = 32;\n\t\tmemcpy(t1, h_out, HASH_OUTPUT_SIZE);\n\t\tfor (x = 0; x < (cur_salt->rfact*100)-1; x++) {\n\t\t\tJTR_hmac_sha256(key, HASH_OUTPUT_SIZE, h_out, ml, h_out, HASH_OUTPUT_SIZE);\n\t\t\tfor (i = 0; i < HASH_OUTPUT_SIZE; i++)\n\t\t\t\tt1[i] ^= h_out[i];\n\t\t}\n\t\tmemcpy(h_out, t1, HASH_OUTPUT_SIZE);\n\n\t\t// encrypt user name\n\t\tmemset(plaint, '\\x00', sizeof(plaint));\n\t\tmemcpy(plaint, cur_salt->userid, 8);\n\t\tmemset(zeroiv, 0, 16);\n\t\tAES_set_encrypt_key((unsigned char*)crypt_out[index], 256, &akey);\n\t\tAES_cbc_encrypt(plaint, (unsigned char*)crypt_out[index], 16, &akey, zeroiv, AES_ENCRYPT);\n\n\t\tMEM_FREE(t1f);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/o5logon_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ny_cracked) {\n\t\tmemset(cracked, 0, sizeof(*cracked) * count);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char key[24];\n\t\tunsigned char iv[16];\n\t\tSHA_CTX ctx;\n\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA1_Update(&ctx, cur_salt->salt, 10);\n\t\tSHA1_Final(key, &ctx);\n\t\tmemset(key + 20, 0, 4);\n\n\t\tif (cur_salt->pw_len) {\n\t\t\tint i;\n\t\t\tunsigned char s_secret[48];\n\t\t\tunsigned char c_secret[48];\n\t\t\tunsigned char combined_sk[24];\n\t\t\tunsigned char final_key[32];\n\t\t\tunsigned char password[16 + PLAINTEXT_LENGTH + 16];\n\t\t\tchar *dec_pw = (char*)password + 16;\n\t\t\tint blen = (saved_len[index] + 15) / 16;\n\t\t\tMD5_CTX ctx;\n\n\t\t\tif (cur_salt->pw_len == blen) {\n\t\t\t\tmemset(iv, 0, 16);\n\t\t\t\taesDec(cur_salt->ct, s_secret, key, 3, iv);\n\n\t\t\t\tmemset(iv, 0, 16);\n\t\t\t\taesDec(cur_salt->csk, c_secret, key, 3, iv);\n\n\t\t\t\tfor (i = 0; i < 24; i++)\n\t\t\t\t\tcombined_sk[i] = s_secret[16 + i] ^ c_secret[16 + i];\n\n\t\t\t\tMD5_Init(&ctx);\n\t\t\t\tMD5_Update(&ctx, combined_sk, 16);\n\t\t\t\tMD5_Final(final_key, &ctx);\n\t\t\t\tMD5_Init(&ctx);\n\t\t\t\tMD5_Update(&ctx, combined_sk + 16, 8);\n\t\t\t\tMD5_Final(final_key + 16, &ctx);\n\n\t\t\t\tmemset(iv, 0, 16);\n\t\t\t\taesDec(cur_salt->pw, password, final_key,\n\t\t\t\t cur_salt->pw_len + 1, iv);\n\n\t\t\t\tif (!memcmp(dec_pw, saved_key[index], saved_len[index]))\n\t\t\t\t{\n\t\t\t\t\tchar *p = dec_pw + 16 * blen - 1;\n\t\t\t\t\tint n, pad;\n\t\t\t\t\tint res = 1;\n\n\t\t\t\t\tn = pad = *p;\n\t\t\t\t\twhile (n--) {\n\t\t\t\t\t\tif (*p-- != pad) {\n\t\t\t\t\t\t\tres = 0;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (res) {\n\t\t\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tunsigned char pt[16];\n\n\t\t\tmemcpy(iv, cur_salt->ct + 16, 16);\n\t\t\taesDec(cur_salt->ct + 32, pt, key, 1, iv);\n\n\t\t\tif (!memcmp(pt + 8, \"\\x08\\x08\\x08\\x08\\x08\\x08\\x08\\x08\", 8))\n\t\t\t{\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/opencl_diskcryptor_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " NULL, multi_profilingEvent[3]), \"Copy result back\");\n\n\tif (!ocl_autotune_running) {\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\t\tuint64_t u[8];\n\t\t\tunsigned char static_privkey[64];\n\t\t\tint j, success;\n\n\t\t\tmemcpy(u, host_crack[index].hash, 64);\n\t\t\tfor (j = 0; j < 8; j++)\n\t\t\t\tu[j] = JOHNSWAP64(u[j]);\n\t\t\tmemcpy(static_privkey, u, 64);\n\n\t\t\tsuccess = diskcryptor_decrypt_data(static_privkey, cur_salt);\n\n\t\t\tif (success) {\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/iwork_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][16];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char**)pin, lens, fctx->salt, fctx->salt_length, fctx->iterations, pout, 16, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tpbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), fctx->salt, fctx->salt_length, fctx->iterations, master[i], 16, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tcracked[index+i] = iwork_decrypt(fctx, master[i], fctx->iv, fctx->blob);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawMD5flat_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = 0;\n#ifdef SIMD_COEF_32\n\tconst int inc = NBKEYS;\n#else\n\tconst int inc = 1;\n#endif\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += inc) {\n#if SIMD_COEF_32\n\t\tSIMDmd5body(saved_key[index], crypt_key[index/NBKEYS], NULL, SSEi_FLAT_IN);\n#else\n\t\tMD5_CTX ctx;\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tMD5_Final((unsigned char *)crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/nukedclan_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char pass[40+1];\n\t\tunsigned char out[80];\n\t\tint i, k;\n\t\tint idx = 0;\n\t\tMD5_CTX c;\n\t\tSHA_CTX ctx;\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tSHA1_Final(out, &ctx);\n\t\thex_encode(out, 20, pass);\n\t\tfor (i = 0, k=cur_salt->decal; i < 40; ++i, ++k) {\n\t\t\tout[idx++] = pass[i];\n\t\t\tif (k>19) k = 0;\n\t\t\tout[idx++] = cur_salt->HASHKEY[k];\n\t\t}\n\t\tMD5_Init(&c);\n\t\tMD5_Update(&c, out, 80);\n\t\tMD5_Final((unsigned char*)crypt_out[index], &c);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rar5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SSE_GROUP_SZ_SHA256\n\t\tint lens[SSE_GROUP_SZ_SHA256], i, j;\n\t\tunsigned char PswCheck[SIZE_PSWCHECK],\n\t\t PswCheckValue[SSE_GROUP_SZ_SHA256][SHA256_DIGEST_SIZE];\n\t\tunsigned char *pin[SSE_GROUP_SZ_SHA256];\n\t\tunion {\n\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA256];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = (uint32_t*)PswCheckValue[i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, &(x.poutc), SHA256_DIGEST_SIZE, 0);\n\t\t// special wtf processing\n\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) {\n\t\t\tmemset(PswCheck, 0, sizeof(PswCheck));\n\t\t\tfor (i = 0; i < SHA256_DIGEST_SIZE; i++)\n\t\t\t\tPswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[j][i];\n\t\t\tmemcpy((void*)crypt_out[index+j], PswCheck, SIZE_PSWCHECK);\n\t\t}\n#else\n\t\tunsigned char PswCheckValue[SHA256_DIGEST_SIZE];\n\t\tunsigned char PswCheck[SIZE_PSWCHECK];\n\t\tint i;\n\t\tpbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, PswCheckValue, SHA256_DIGEST_SIZE, 0);\n\t\t// special wtf processing\n\t\tmemset(PswCheck, 0, sizeof(PswCheck));\n\t\tfor (i = 0; i < SHA256_DIGEST_SIZE; i++)\n\t\t\tPswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[i];\n\t\tmemcpy((void*)crypt_out[index], PswCheck, SIZE_PSWCHECK);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/encdatavault_md5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n\tnb_keys = 1 << (cur_salt->algo_id - 1);\n\n#ifdef _OPENMP\n\tfor (index = 0; index < count; index += MIN_KEYS_PER_MD5_CRYPT) {\n\t\tint i, j;\n\t\tMD5_CTX ctx;\n\t\tbuffer_128 kdf_out[MIN_KEYS_PER_MD5_CRYPT][ENC_MAX_KEY_NUM];\n\t\tbuffer_128 hash;\n\t\tbuffer_128 tmp;\n\t\tbuffer_128 ivs[ENC_MAX_KEY_NUM];\n\t\tunsigned char result[ENC_KEY_SIZE * ENC_MAX_KEY_NUM] = { 0 };\n\n\t\t// Key derivation based on MD5\n\t\tfor (i = 0; i < MIN_KEYS_PER_MD5_CRYPT; ++i) {\n\t\t\ttmp.u64[0] = 0;\n\t\t\ttmp.u64[1] = 0;\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, saved_key[index + i], strlen(saved_key[index + i]));\n\t\t\tMD5_Final(hash.u8, &ctx);\n\n\t\t\tfor (j = 0; j < ENC_MAX_KEY_NUM; j++) {\n\t\t\t\tmemcpy(kdf_out[i][j].u8, default_salts[j], ENC_SALT_SIZE);\n\t\t\t}\n\n\t\t\tfor (j = 1; j < ENC_DEFAULT_MD5_ITERATIONS; j++) {\n\t\t\t\tMD5_Init(&ctx);\n\t\t\t\tMD5_Update(&ctx, hash.u8, 16);\n\t\t\t\tMD5_Final(hash.u8, &ctx);\n\t\t\t\tenc_xor_block(tmp.u64, hash.u64);\n\t\t\t}\n\n\t\t\tfor (j = 0; j < ENC_MAX_KEY_NUM; j++) {\n\t\t\t\tenc_xor_block(kdf_out[i][j].u64, tmp.u64);\n\t\t\t}\n\t\t}\n\t\t/* AES iterated CTR */\n\t\tfor (i = 0; i < MIN_KEYS_PER_MD5_CRYPT; ++i) {\n\t\t\tif ((cur_salt->version & 0x0f) == 1) {\n\t\t\t\tmemcpy(ivs[0].u8, cur_salt->iv, ENC_NONCE_SIZE);\n\t\t\t\tfor (j = 1; j < nb_keys; j++) {\n\t\t\t\t\tmemcpy(ivs[j].u8, cur_salt->iv, ENC_NONCE_SIZE);\n\t\t\t\t\tivs[j].u64[0] ^= kdf_out[i][j].u64[0];\n\t\t\t\t}\n\t\t\t\t// result buffer is used here to hold the decrypted data.\n\t\t\t\tenc_aes_ctr_iterated(cur_salt->encrypted_data, result, kdf_out[i][0].u8, ivs, AES_BLOCK_SIZE,\n\t\t\t\t nb_keys, 1);\n\t\t\t\tif (!memcmp(result + 4, \"\\xd2\\xc3\\xb4\\xa1\\x00\\x00\", MIN(cur_salt->encrypted_data_length, ENC_SIG_SIZE - 2))) {\n\t\t\t\t\tcracked[index + i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Decrypt keychain\n\t\t\t\tivs[0].u64[0] = 0;\n\t\t\t\tfor (j = 1; j < ENC_MAX_KEY_NUM; j++) {\n\t\t\t\t\tivs[j].u64[0] = kdf_out[i][ENC_MAX_KEY_NUM - j].u64[0];\n\t\t\t\t}\n\t\t\t\t// result buffer is used for the decrypted keys from the keychain\n\t\t\t\tenc_aes_ctr_iterated(cur_salt->keychain, result, kdf_out[i][0].u8, ivs, ENC_KEYCHAIN_SIZE,\n\t\t\t\t ENC_MAX_KEY_NUM, 0);\n\n\t\t\t\t// Decrypt data\n\t\t\t\tmemcpy(ivs[0].u8, cur_salt->iv, ENC_NONCE_SIZE);\n\t\t\t\tfor (j = 1; j < nb_keys; j++) {\n\t\t\t\t\tmemcpy(ivs[j].u8, cur_salt->iv, ENC_NONCE_SIZE);\n\t\t\t\t\tmemcpy(tmp.u8, result + j * 16, ENC_NONCE_SIZE);\n\t\t\t\t\tivs[j].u64[0] ^= tmp.u64[0];\n\t\t\t\t}\n\t\t\t\t// result buffer is reused here to hold the decrypted data.\n\t\t\t\tenc_aes_ctr_iterated(cur_salt->encrypted_data, result, result, ivs, AES_BLOCK_SIZE, nb_keys, 1);\n\t\t\t\tif (!memcmp(result + 4, \"\\xd2\\xc3\\xb4\\xa1\\x00\\x00\", MIN(cur_salt->encrypted_data_length, ENC_SIG_SIZE - 2))) {\n\t\t\t\t\tcracked[index + i] = 1;\n#ifdef _OPENMP\n\t\t\t\t\t#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/openbsdsoftraid_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tAES_KEY akey;\n\t\tunsigned char mask_key[MIN_KEYS_PER_CRYPT][32];\n\t\tunsigned char unmasked_keys[OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS];\n\t\tunsigned char hashed_mask_key[20];\n\t\tint i, j;\n\n\t\t/* derive masking key from password */\n\t\tif (cur_salt->kdf_type == 1) {\n#ifdef SSE_GROUP_SZ_SHA1\n\t\t\tint lens[SSE_GROUP_SZ_SHA1];\n\t\t\tunsigned char *pin[SSE_GROUP_SZ_SHA1], *pout[SSE_GROUP_SZ_SHA1];\n\t\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {\n\t\t\t\tlens[i] = strlen(key_buffer[index+i]);\n\t\t\t\tpin[i] = (unsigned char*)key_buffer[index+i];\n\t\t\t\tpout[i] = mask_key[i];\n\t\t\t}\n\t\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens,\n\t\t\t\t\tcur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,\n\t\t\t\t\tcur_salt->num_iterations, (unsigned char**)pout,\n\t\t\t\t\t32, 0);\n#else\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tpbkdf2_sha1((const unsigned char*)(key_buffer[index+i]),\n\t\t\t\t\t\tstrlen(key_buffer[index+i]),\n\t\t\t\t\t\tcur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,\n\t\t\t\t\t\tcur_salt->num_iterations, mask_key[i],\n\t\t\t\t\t\t32, 0);\n\t\t\t}\n\n\t\t} else if (cur_salt->kdf_type == 3) {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tbcrypt_pbkdf((const char*)key_buffer[index+i],\n\t\t\t\t\t\tstrlen(key_buffer[index+i]),\n\t\t\t\t\t\tcur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,\n\t\t\t\t\t\tmask_key[i], 32, cur_salt->num_iterations);\n\t\t\t}\n\t\t}\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t/* decrypt sector keys */\n\t\t\tAES_set_decrypt_key(mask_key[i], 256, &akey);\n\t\t\tfor (j = 0; j < (OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS) / 16; j++) {\n\t\t\t\tAES_decrypt(&cur_salt->masked_keys[16*j], &unmasked_keys[16*j], &akey);\n\t\t\t}\n\n\t\t\t/* get SHA1 of mask_key */\n\t\t\tSHA_CTX ctx;\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, mask_key[i], 32);\n\t\t\tSHA1_Final(hashed_mask_key, &ctx);\n\n\t\t\thmac_sha1(hashed_mask_key, OPENBSD_SOFTRAID_MACLENGTH,\n\t\t\t\t\tunmasked_keys, OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS,\n\t\t\t\t\t(unsigned char*)crypt_out[index+i], 20);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/axcrypt_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 0;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\t/*\n\t\t * NUMBER_AES_BLOCKS = 2\n\t\t * AES_BLOCK_SIZE = 16\n\t\t */\n\n\t\tunsigned char KEK[32];\n\t\tAES_KEY akey;\n\t\tint i;\n\n\t\tif (cur_salt->version == 1) {\n\t\t\t// See axcrypt/AxCryptCommon/CAes.cpp (CAesWrap::UnWrap) and axcrypt/AxCrypt/CSha1.cpp (CSha1::GetKeyHash)\n\t\t\t// from AxCrypt-1.7.3180.0-Source.zip file. V1KeyWrap1HeaderBlock.cs, V1KeyWrap1HeaderBlock.cs and\n\t\t\t// V1AxCryptDocument.cs from https://bitbucket.org/axantum/axcrypt-net are also relevant.\n\t\t\tunion {\n\t\t\t\tunsigned char b[16];\n\t\t\t\tuint32_t w[4];\n\t\t\t} lsb;\n\n\t\t\tunion {\n\t\t\t\tunsigned char b[16];\n\t\t\t\tuint32_t w[4];\n\t\t\t} cipher;\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\t\tSHA_CTX ctx;\n\n\t\t\t\tint k, j, nb_iterations = cur_salt->key_wrapping_rounds;\n\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));\n\t\t\t\t/* if key-file provided */\n\t\t\t\tif (cur_salt->keyfile != NULL)\n\t\t\t\t\tSHA1_Update(&ctx, (unsigned char *) cur_salt->keyfile, strlen(cur_salt->keyfile));\n\t\t\t\tSHA1_Final(KEK, &ctx);\n\n\t\t\t\t/* hash XOR salt => KEK */\n\t\t\t\tfor (k = 0; k < 16; k++)\n\t\t\t\t\tKEK[k] ^= cur_salt->salt[k];\n\n\t\t\t\tmemcpy(lsb.b, cur_salt->wrappedkey + 8, 16);\n\n\t\t\t\tAES_set_decrypt_key(KEK, 128, &akey);\n\n\t\t\t\t/* set msb */\n\t\t\t\tmemcpy(cipher.b, cur_salt->wrappedkey, 8);\n\n\t\t\t\t/* custom AES un-wrapping loop */\n\t\t\t\tfor (j = nb_iterations - 1; j >= 0; j--) {\n\n\t\t\t\t\t/* 1st block treatment */\n\t\t\t\t\t/* MSB XOR (NUMBER_AES_BLOCKS * j + i) */\n\t\t\t\t\tPUT_64BITS_XOR_MSB(cipher.b, 2 * j + 2);\n\t\t\t\t\t/* R[i] */\n\t\t\t\t\tcipher.w[2] = lsb.w[2];\n\t\t\t\t\tcipher.w[3] = lsb.w[3];\n\t\t\t\t\t/* AES_ECB(KEK, (MSB XOR (NUMBER_AES_BLOCKS * j + i)) | R[i]) */\n\t\t\t\t\tAES_decrypt(cipher.b, cipher.b, &akey);\n\t\t\t\t\tlsb.w[2] = cipher.w[2];\n\t\t\t\t\tlsb.w[3] = cipher.w[3];\n\n\t\t\t\t\t/* 2nd block treatment */\n\t\t\t\t\tPUT_64BITS_XOR_MSB(cipher.b, 2 * j + 1);\n\t\t\t\t\tcipher.w[2] = lsb.w[0];\n\t\t\t\t\tcipher.w[3] = lsb.w[1];\n\t\t\t\t\tAES_decrypt(cipher.b, cipher.b, &akey);\n\t\t\t\t\tlsb.w[0] = cipher.w[2];\n\t\t\t\t\tlsb.w[1] = cipher.w[3];\n\t\t\t\t}\n\t\t\t\tif (!memcmp(cipher.b, AES_WRAPPING_IV, 8)) {\n\t\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse if (cur_salt->version == 2) {\n\t\t\t// See V2AxCryptDocument.cs for internal crypto details\n\t\t\tunsigned char seed[MIN_KEYS_PER_CRYPT][64];\n\t\t\tint i;\n#ifdef SIMD_COEF_64\n\t\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\t\tpout[i] = seed[i];\n\t\t\t}\n\t\t\tpbkdf2_sha512_sse((const unsigned char**)pin, lens, cur_salt->deriv_salt, 32, cur_salt->deriv_iterations, pout, 64, 0);\n#else\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\t\tpbkdf2_sha512((unsigned char*)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->deriv_salt, 32, cur_salt->deriv_iterations, seed[i], 64, 0);\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\t\tint k, j, nb_iterations = cur_salt->key_wrapping_rounds;\n\t\t\t\tint halfblocklen = 16 / 2;\n\t\t\t\tint wrappedkeylen = 56 - halfblocklen;\n\t\t\t\tunsigned char wrapped[144];\n\t\t\t\tunsigned char block[16];\n\t\t\t\tint t;\n\n\t\t\t\t/* ByteArrayExtensions -> Reduce */\n\t\t\t\tmemset(KEK, 0, 32);\n\t\t\t\tfor (k = 0; k < 64 ; k++)\n\t\t\t\t\tKEK[k % 32] ^= seed[i][k];\n\n\t\t\t\t/* hash XOR salt => KEK */\n\t\t\t\tfor (k = 0; k < 32; k++)\n\t\t\t\t\tKEK[k] = KEK[k] ^ cur_salt->salt[k];\n\n\t\t\t\tAES_set_decrypt_key(KEK, 256, &akey);\n\t\t\t\tmemcpy(wrapped, cur_salt->wrappedkey, 56);\n\n\t\t\t\t/* custom AES un-wrapping loop */\n\t\t\t\tfor (j = nb_iterations - 1; j >= 0; j--) {\n\t\t\t\t\tfor (k = wrappedkeylen / halfblocklen; k >= 1; --k) {\n\t\t\t\t\t\tt = ((wrappedkeylen / halfblocklen) * j) + k;\n\t\t\t\t\t\t// MSB(B) = A XOR t\n\t\t\t\t\t\tmemcpy(block, wrapped, halfblocklen);\n\t\t\t\t\t\tPUT_64BITS_XOR_LSB(block, t);\n\t\t\t\t\t\t// LSB(B) = R[i]\n\t\t\t\t\t\tmemcpy(block + halfblocklen, wrapped + k * halfblocklen, halfblocklen);\n\t\t\t\t\t\t// B = AESD(K, X xor t | R[i]) where t = (n * j) + i\n\t\t\t\t\t\tAES_decrypt(block, block, &akey);\n\t\t\t\t\t\t// A = MSB(B)\n\t\t\t\t\t\tmemcpy(wrapped, block, halfblocklen);\n\t\t\t\t\t\t// R[i] = LSB(B)\n\t\t\t\t\t\tmemcpy(wrapped + k * halfblocklen, block + halfblocklen, halfblocklen);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (!memcmp(wrapped, AES_WRAPPING_IV, 8)) {\n\t\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/kwallet_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char key[MIN_KEYS_PER_CRYPT][56]; /* 56 seems to be the max. key size */\n\t\tint key_size[MIN_KEYS_PER_CRYPT];\n\t\tint i;\n\n\t\tif (cur_salt->kwallet_minor_version == 0) {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tpassword2hash(saved_key[index+i], key[i], &key_size[i]);\n\t\t\t\tcracked[index+i] = !verify_key(key[i], key_size[i]);\n\t\t\t}\n\t\t} else if (cur_salt->kwallet_minor_version == 1) {\n#ifdef SIMD_COEF_64\n\t\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\t\tpout[i] = key[i];\n\t\t\t}\n\t\t\tpbkdf2_sha512_sse((const unsigned char **)pin, len, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, pout, 56, 0);\n#else\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tpbkdf2_sha512((const unsigned char*)(saved_key[index+i]),\n\t\t\t\t\tstrlen(saved_key[index+i]), cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, cur_salt->iterations,\n\t\t\t\t\tkey[i], 56, 0);\n\t\t\t}\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\t\tcracked[index+i] = !verify_key(key[i], 56);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pkzip_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for private(idx)", "context_chars": 100, "text": "y globbing many tests into a threads working set will flatten out these differences.\n#ifdef _OPENMP\nfor (idx = 0; idx < _count; ++idx) {\n\t\tint cur_hash_count = salt->cnt;\n\t\tint cur_hash_idx = -1;\n\t\tMY_WORD key0, key1, key2;\n\t\tu8 C;\n\t\tconst u8 *b;\n\t\tu8 curDecryBuf[256];\n#if USE_PKZIP_MAGIC\n\t\tu8 curInfBuf[128];\n\n\t\tint k, SigChecked;\n\t\tu16 e, v1, v2;\n\t\tz_stream strm;\n\t\tint ret;\n\n\t\t/* use the pwkey for each hash. We mangle on the 12 bytes of IV to what was computed in the pwkey load. */\n\n\t\tif (dirty) {\n\t\t\tu8 *p = (u8*)saved_key[idx];\n\n\t\t\t/* load the 'pwkey' one time, put it into the K12 array */\n\t\t\tkey0.u = 0x12345678UL; key1.u = 0x23456789UL; key2.u = 0x34567890UL;\n\t\t\tdo {\n\t\t\t\tkey0.u = jtr_crc32 (key0.u, *p++);\n\t\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\t\t\t} while (*p);\n\t\t\tK12[idx*3] = key0.u, K12[idx*3+1] = key1.u, K12[idx*3+2] = key2.u;\n\t\t\tgoto SkipKeyLoadInit;\n\t\t}\n\n\t\tdo\n\t\t{\n\t\t\t// 2nd, and later times through the loop, AND if keys are not dirty (i.e. multiple salts\n\t\t\t// for the same key load), we do NOT perform the key compute, but instead load the pre-computed\n\t\t\t// key data from the array.\n\t\t\tkey0.u = K12[idx*3], key1.u = K12[idx*3+1], key2.u = K12[idx*3+2];\n\n\t\tSkipKeyLoadInit:;\n\t\t\tb = salt->H[++cur_hash_idx].h;\n\t\t\tk=11;\n\t\t\te = salt->H[cur_hash_idx].c;\n\n\t\t\tdo\n\t\t\t{\n\t\t\t\tC = PKZ_MULT(*b++,key2);\n\t\t\t\tkey0.u = jtr_crc32 (key0.u, C);\n\t\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\t\t\t}\n\t\t\twhile(--k);\n\n\t\t\tif (salt->H[cur_hash_idx].type == 2) {\n\t\t\t\tu16 e2 = salt->H[cur_hash_idx].c2;\n\n\t\t\t\tif (salt->chk_bytes == 2 && C != (e & 0xff) && C != (e2 & 0xff))\n\t\t\t\t\tgoto Failed_Bailout;\n\n\t\t\t\tC = PKZ_MULT(*b++, key2);\n\n\t\t\t\tif (C != (e >> 8) && C != (e2 >> 8))\n\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t} else {\n\t\t\t\tif (salt->chk_bytes == 2 && C != (e & 0xff))\n\t\t\t\t\tgoto Failed_Bailout;\n\n\t\t\t\tC = PKZ_MULT(*b++, key2);\n\n\t\t\t\tif (C != (e >> 8))\n\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t}\n\n\t\t\t// Now, update the key data (with that last byte.\n\t\t\tkey0.u = jtr_crc32 (key0.u, C);\n\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\n\t\t\t// Ok, we now have validated this checksum. We need to 'do some' extra pkzip validation work.\n\t\t\t// What we do here, is to decrypt a little data (possibly only 1 byte), and perform a single\n\t\t\t// 'inflate' check (if type is 8). If type is 0 (stored), and we have a signature check, then\n\t\t\t// we do that here. Also, if the inflate code is a 0 (stored block), and we do sig check, then\n\t\t\t// we can do that WITHOUT having to call inflate. however, if there IS a sig check, we will have\n\t\t\t// to call inflate on 'some' data, to get a few bytes (or error code). Also, if this is a type\n\t\t\t// 2 or 3, then we do the FULL inflate, CRC check here.\n\t\t\te = 0;\n\n\t\t\t// First, we want to get the inflate CODE byte (the first one).\n\n\t\t\tC = PKZ_MULT(*b++,key2);\n\t\t\tSigChecked = 0;\n\t\t\tif (salt->H[cur_hash_idx].compType == 0) {\n\t\t\t\t// handle a stored file.\n\t\t\t\t// We can ONLY deal with these IF we are handling 'magic' testing.\n\n#if USE_PKZIP_MAGIC\n\t\t\t\t// Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate.\n\t\t\t\tif (salt->H[cur_hash_idx].pSig->max_len) {\n\t\t\t\t\tint len = salt->H[cur_hash_idx].pSig->max_len;\n\t\t\t\t\tif (len > salt->H[cur_hash_idx].datlen-12)\n\t\t\t\t\t\tlen = salt->H[cur_hash_idx].datlen-12;\n\t\t\t\t\tSigChecked = 1;\n\t\t\t\t\tcurDecryBuf[0] = C;\n\t\t\t\t\tfor (; e < len;) {\n\t\t\t\t\t\tkey0.u = jtr_crc32 (key0.u, curDecryBuf[e]);\n\t\t\t\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\t\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\t\t\t\t\t\tcurDecryBuf[++e] = PKZ_MULT(*b++,key2);\n\t\t\t\t\t}\n\n\t\t\t\t\tif (salt->H[cur_hash_idx].magic == 255) {\n\t\t\t\t\t\tif (!validate_ascii(&curDecryBuf[5], len-5))\n\t\t\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif (!CheckSigs(curDecryBuf, len, salt->H[cur_hash_idx].pSig))\n\t\t\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tcontinue;\n\t\t\t}\n#if 1\n\t\t\t// https://github.com/openwall/john/issues/467\n\t\t\t// Ok, if this is a code 3, we are done.\n\t\t\t// Code moved to after the check for stored type. (FIXED) This check was INVALID for a stored type file.\n\t\t\tif ((C & 6) == 6)\n\t\t\t\tgoto Failed_Bailout;\n\n\t\t\tif ((C & 6) == 0) {\n\t\t\t\t// Check that checksum2 is 0 or 1. If not, I 'think' we can be done\n\t\t\t\tif (C > 1)\n\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t// now get 4 bytes. This is the length. It is made up of 2 16 bit values.\n\t\t\t\t// these 2 values are checksumed, so it is easy to tell if the data is WRONG.\n\t\t\t\t// correct data is u16_1 == (u16_2^0xFFFF)\n\t\t\t\tcurDecryBuf[0] = C;\n\t\t\t\tfor (e = 0; e <= 4;) {\n\t\t\t\t\tkey0.u = jtr_crc32 (key0.u, curDecryBuf[e]);\n\t\t\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\t\t\t\t\tcurDecryBuf[++e] = PKZ_MULT(*b++,key2);\n\t\t\t\t}\n\t\t\t\tv1 = curDecryBuf[1] | (((u16)curDecryBuf[2])<<8);\n\t\t\t\tv2 = curDecryBuf[3] | (((u16)curDecryBuf[4])<<8);\n\t\t\t\tif (v1 != (v2^0xFFFF))\n\t\t\t\t\tgoto Failed_Bailout;\n#if USE_PKZIP_MAGIC\n\t\t\t\t// Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate.\n\t\t\t\tif (salt->H[cur_hash_idx].pSig->max_len) {\n\t\t\t\t\tint len = salt->H[cur_hash_idx].pSig->max_len + 5;\n\t\t\t\t\tif (len > salt->H[cur_hash_idx].datlen-12)\n\t\t\t\t\t\tlen = salt->H[cur_hash_idx].datlen-12;\n\t\t\t\t\tSigChecked = 1;\n\t\t\t\t\tfor (; e < len;) {\n\t\t\t\t\t\tkey0.u = jtr_crc32 (key0.u, curDecryBuf[e]);\n\t\t\t\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\t\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\t\t\t\t\t\tcurDecryBuf[++e] = PKZ_MULT(*b++,key2);\n\t\t\t\t\t}\n\n\t\t\t\t\tif (salt->H[cur_hash_idx].magic == 255) {\n\t\t\t\t\t\tif (!validate_ascii(&curDecryBuf[5], len-5))\n\t\t\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif (!CheckSigs(&curDecryBuf[5], len-5, salt->H[cur_hash_idx].pSig))\n\t\t\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Ok, now we have handled inflate code type 3 and inflate code 0 (50% of 'random' data)\n\t\t\t\t// We now have the 2 'hard' ones left (fixed table, and variable table)\n\n\t\t\t\tcurDecryBuf[0] = C;\n\n\t\t\t\tif ((C & 6) == 4) { // inflate 'code' 2 (variable table)\n#if (ZIP_DEBUG==2)\n\t\t\t\t\tstatic unsigned count, found;\n\t\t\t\t\t++count;\n\n\t\t\t\t\t// we need 4 bytes, + 2, + 4 at most.\n\t\t\t\t\tfor (; e < 10;) {\n\t\t\t\t\t\tkey0.u = jtr_crc32 (key0.u, curDecryBuf[e]);\n\t\t\t\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\t\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\t\t\t\t\t\tcurDecryBuf[++e] = PKZ_MULT(*b++,key2);\n\t\t\t\t\t}\n\t\t\t\t\tif (!check_inflate_CODE2(curDecryBuf))\n\t\t\t\t\t\tgoto Failed_Bailout;\n#if (ZIP_DEBUG==2)\n\t\t\t\t\tfprintf(stderr, \"CODE2 Pass=%s count = %u, found = %u\\n\", saved_key[idx], count, ++found);\n\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tint til;\n#if (ZIP_DEBUG==2)\n\t\t\t\t\tstatic unsigned count, found;\n\t\t\t\t\t++count;\n\n\t\t\t\t\ttil = 36;\n\t\t\t\t\tif (salt->H[cur_hash_idx].datlen-12 < til)\n\t\t\t\t\t\ttil = salt->H[cur_hash_idx].datlen-12;\n\t\t\t\t\tfor (; e < til;) {\n\t\t\t\t\t\tkey0.u = jtr_crc32 (key0.u, curDecryBuf[e]);\n\t\t\t\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\t\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\t\t\t\t\t\tcurDecryBuf[++e] = PKZ_MULT(*b++,key2);\n\t\t\t\t\t}\n\t\t\t\t\tif (!check_inflate_CODE1(curDecryBuf, til))\n\t\t\t\t\t\tgoto Failed_Bailout;\n#if (ZIP_DEBUG==2)\n\t\t\t\t\tfprintf(stderr, \"CODE1 Pass=%s count = %u, found = %u\\n\", saved_key[idx], count, ++found);\n\n\t\t\t\t}\n\t\t\t}\n#if USE_PKZIP_MAGIC\n\t\t\t// Ok, now see if we need to check sigs, or do a FULL inflate/crc check.\n\t\t\tif (!SigChecked && salt->H[cur_hash_idx].pSig->max_len) {\n\t\t\t\tint til = 180;\n\t\t\t\tif (salt->H[cur_hash_idx].datlen-12 < til)\n\t\t\t\t\ttil = salt->H[cur_hash_idx].datlen-12;\n\t\t\t\tfor (; e < til;) {\n\t\t\t\t\tkey0.u = jtr_crc32 (key0.u, curDecryBuf[e]);\n\t\t\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\t\t\t\t\tcurDecryBuf[++e] = PKZ_MULT(*b++,key2);\n\t\t\t\t}\n\t\t\t\tstrm.zalloc = Z_NULL;\n\t\t\t\tstrm.zfree = Z_NULL;\n\t\t\t\tstrm.opaque = Z_NULL;\n\t\t\t\tstrm.next_in = Z_NULL;\n\t\t\t\tstrm.avail_in = til;\n\n\t\t\t\tret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */\n\t\t\t\tif (ret != Z_OK)\n\t\t\t\t\tperror(\"Error, initializing the libz inflateInit2() system\\n\");\n\n\t\t\t\tstrm.next_in = curDecryBuf;\n\t\t\t\tstrm.avail_out = sizeof(curInfBuf);\n\t\t\t\tstrm.next_out = curInfBuf;\n\n\t\t\t\tret = inflate(&strm, Z_SYNC_FLUSH);\n\n\t\t\t\tinflateEnd(&strm);\n\t\t\t\tif (ret != Z_OK) {\n\t\t\t\t\t// we need to handle zips smaller than sizeof curInfBuf. If we find a zip of this\n\t\t\t\t\t// size, the return is Z_STREAM_END, BUT things are fine.\n\t\t\t\t\tif (ret == Z_STREAM_END && salt->deCompLen == strm.total_out)\n\t\t\t\t\t\t; // things are ok.\n\t\t\t\t\telse\n\t\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t}\n\t\t\t\tif (!strm.total_out)\n\t\t\t\t\tgoto Failed_Bailout;\n\n\t\t\t\tret = salt->H[cur_hash_idx].pSig->max_len;\n\t\t\t\tif (salt->H[cur_hash_idx].magic == 255) {\n\t\t\t\t\tif (!validate_ascii(curInfBuf, strm.total_out))\n\t\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t} else {\n\t\t\t\t\tif (strm.total_out < ret)\n\t\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t\tif (!CheckSigs(curInfBuf, strm.total_out, salt->H[cur_hash_idx].pSig))\n\t\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t}\n\t\t\t}\n\n\n\t\t\tif (salt->H[cur_hash_idx].full_zip) {\n\t\t\t\tu8 inflateBufTmp[1024];\n\t\t\t\tif (salt->compLen > 240 && salt->H[cur_hash_idx].datlen >= 200) {\n\t\t\t\t\tfor (;e < 200;) {\n\t\t\t\t\t\tkey0.u = jtr_crc32 (key0.u, curDecryBuf[e]);\n\t\t\t\t\t\tkey1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;\n\t\t\t\t\t\tkey2.u = jtr_crc32 (key2.u, key1.c[KB2]);\n\t\t\t\t\t\tcurDecryBuf[++e] = PKZ_MULT(*b++,key2);\n\t\t\t\t\t}\n\t\t\t\t\tstrm.zalloc = Z_NULL;\n\t\t\t\t\tstrm.zfree = Z_NULL;\n\t\t\t\t\tstrm.opaque = Z_NULL;\n\t\t\t\t\tstrm.next_in = Z_NULL;\n\t\t\t\t\tstrm.avail_in = e;\n\n\t\t\t\t\tret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */\n\t\t\t\t\tif (ret != Z_OK)\n\t\t\t\t\t\tperror(\"Error, initializing the libz inflateInit2() system\\n\");\n\n\t\t\t\t\tstrm.next_in = curDecryBuf;\n\t\t\t\t\tstrm.avail_out = sizeof(inflateBufTmp);\n\t\t\t\t\tstrm.next_out = inflateBufTmp;\n\n\t\t\t\t\tret = inflate(&strm, Z_SYNC_FLUSH);\n\t\t\t\t\tinflateEnd(&strm);\n\n\t\t\t\t\tif (ret != Z_OK) {\n#if (ZIP_DEBUG==2)\n\t\t\t\t\t\tfprintf(stderr, \"fail=%d fail2=%d tot=\"LLd\"\\n\", ++FAILED, FAILED2, ((long long)CNT)*_count);\n\n\t\t\t\t\t\tgoto Failed_Bailout;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tgoto KnownSuccess;\n\t\t\t}\n\t\t}\n\t\twhile(--cur_hash_count);\n\n\t\t/* We got a checksum HIT!!!! All hash checksums matched. */\n\t\t/* We load the proper checksum value for the gethash */\n\tKnownSuccess: ;\n\t\tchk[idx] = 1;\n\n\t\tcontinue;\n\n\tFailed_Bailout: ;\n\t\t/* We load the wrong checksum value for the gethash */\n\t\tchk[idx] = 0;\n\t} #pragma omp parallel for private(idx)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/drupal7_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index+=MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_64\n\t\tunsigned char _IBuf[128*MIN_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys;\n\t\tuint64_t *keys64;\n\t\tunsigned i, j, len;\n\n\t\tkeys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);\n\t\tkeys64 = (uint64_t*)keys;\n\t\tmemset(keys, 0, 128*MIN_KEYS_PER_CRYPT);\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen = EncKeyLen[index+i];\n\t\t\tfor (j = 0; j < 8; ++j)\n\t\t\t\tkeys[GETPOS(j, i)] = cursalt[j];\n\t\t\tfor (j = 0; j < len; ++j)\n\t\t\t\tkeys[GETPOS(j+8, i)] = EncKey[index+i][j];\n\t\t\tkeys[GETPOS(j+8, i)] = 0x80;\n\t\t\tkeys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+8) << 3;\n\t\t}\n\t\tSIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen = EncKeyLen[index+i];\n\t\t\tfor (j = 0; j < len; ++j)\n\t\t\t\tkeys[GETPOS(j+64, i)] = EncKey[index+i][j];\n\t\t\tkeys[GETPOS(j+64, i)] = 0x80;\n\t\t\tkeys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+64) << 3;\n\t\t}\n\n\t\tuint64_t Lcount = loopCnt - 1;\n\t\tSIMDSHA512body(keys, keys64, &Lcount, SSEi_MIXED_IN|SSEi_LOOP|SSEi_OUTPUT_AS_INP_FMT);\n\n\t\t// Last one with FLAT_OUT\n\t\tSIMDSHA512body(keys, (uint64_t*)crypt_key[index], NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);\n#else\n\t\tSHA512_CTX ctx;\n\t\tunsigned char tmp[DIGEST_SIZE + PLAINTEXT_LENGTH];\n\t\tint len = EncKeyLen[index];\n\t\tunsigned Lcount = loopCnt - 1;\n\n\t\tSHA512_Init( &ctx );\n\t\tSHA512_Update( &ctx, cursalt, 8 );\n\t\tSHA512_Update( &ctx, EncKey[index], len );\n\t\tmemcpy(&tmp[DIGEST_SIZE], (char *)EncKey[index], len);\n\t\tSHA512_Final( tmp, &ctx);\n\t\tlen += DIGEST_SIZE;\n\n\t\tdo {\n\t\t\tSHA512_Init( &ctx );\n\t\t\tSHA512_Update( &ctx, tmp, len);\n\t\t\tSHA512_Final( tmp, &ctx);\n\t\t} while (--Lcount);\n\t\tSHA512_Init( &ctx );\n\t\tSHA512_Update( &ctx, tmp, len);\n\t\tSHA512_Final( (unsigned char *) crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawmd5u_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "OCK_LOOPS),\n\t// cause it does not scale well. We would need to parallelize set_key()\n#ifdef _OPENMP\nfor (i = 0; i < BLOCK_LOOPS; i++)\n\t\tSIMDmd5body(&saved_key[i*NBKEYS*64], (unsigned int*)&crypt_key[i*NBKEYS*BINARY_SIZE], NULL, SSEi_MIXED_IN);\n#else\n\tSIMDmd5body(saved_key, (unsigned int*)crypt_key, NULL, SSEi_MIXED_IN);\n\n#else\n\tMD5_Init( &ctx );\n\tMD5_Update(&ctx, (unsigned char*)saved_key, saved_len);\n\tMD5_Final((unsigned char*) crypt_key, &ctx);\n\n\treturn count;\n}\n\n#define COMMON_GET_HASH_SIMD32 4\n#define COMMON_GET_HASH_VAR crypt_key\n#include \"common-get-hash.h\"\n\nstruct fmt_main fmt_rawmd5uthick = {\n\t{\n\t\tFORMAT_LABEL,\n\t\tFORMAT_NAME,\n\t\tALGORITHM_NAME,\n\t\tBENCHMARK_COMMENT,\n\t\tBENCHMARK_LENGTH,\n\t\t0,\n\t\tPLAINTEXT_LENGTH,\n\t\tBINARY_SIZE,\n\t\tBINARY_ALIGN,\n\t\tSALT_SIZE,\n\t\tSALT_ALIGN,\n\t\tMIN_KEYS_PER_CRYPT,\n\t\tMAX_KEYS_PER_CRYPT,\n#if (BLOCK_LOOPS > 1) && defined(SSE_MD5_PARA)\n\t\tFMT_OMP |\n\n\t\tFMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_ENC | FMT_SPLIT_UNIFIES_CASE,\n\t\t{ NULL },\n\t\t{ NULL },\n\t\ttests\n\t}, {\n\t\tinit,\n\t\tdone,\n\t\tfmt_default_reset,\n\t\tfmt_default_prepare,\n\t\tvalid,\n\t\tsplit,\n\t\tget_binary,\n\t\tfmt_default_salt,\n\t\t{ NULL },\n\t\tfmt_default_source,\n\t\t{\n\t\t\tfmt_default_binary_hash_0,\n\t\t\tfmt_default_binary_hash_1,\n\t\t\tfmt_default_binary_hash_2,\n\t\t\tfmt_default_binary_hash_3,\n\t\t\tfmt_default_binary_hash_4,\n\t\t\tfmt_default_binary_hash_5,\n\t\t\tfmt_default_binary_hash_6\n\t\t},\n\t\tfmt_default_salt_hash,\n\t\tNULL,\n\t\tfmt_default_set_salt,\n\t\tset_key,\n\t\tget_key,\n\t\tfmt_default_clear_keys,\n\t\tcrypt_all,\n\t\t{\n#define COMMON_GET_HASH_LINK\n#include \"common-get-hash.h\"\n\t\t},\n\t\tcmp_all,\n\t\tcmp_one,\n\t\tcmp_exact\n\t}\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/skein_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt crypt_256(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_skein256_context ctx;\n\n\t\tsph_skein256_init(&ctx);\n\t\tsph_skein256(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_skein256_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/skein_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt crypt_512(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_skein512_context ctx;\n\n\t\tsph_skein512_init(&ctx);\n\t\tsph_skein512(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_skein512_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/bks_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ny_cracked) {\n\t\tmemset(cracked, 0, sizeof(*cracked) * count);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#if !defined(SIMD_COEF_32)\n\t\tif (cur_salt->format == 0) {\n\t\t\tunsigned char mackey[20];\n\t\t\tint mackeylen = cur_salt->hmac_key_size / 8;\n\t\t\t// mackeylen is only 2 bytes, and this results in lot\n\t\t\t// of collisions (which work just fine)\n\t\t\t//\n\t\t\t// FMT_NOT_EXACT can be turned on for BKS keystores\n\t\t\t// for finding more possible passwords\n\t\t\tunsigned char store_hmac_calculated[20];\n\n\t\t\tpkcs12_pbe_derive_key(1, cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\thmac_sha1(mackey, mackeylen, cur_salt->store_data,\n\t\t\t\t\tcur_salt->store_data_length,\n\t\t\t\t\tstore_hmac_calculated, 20);\n\n\t\t\tif (!memcmp(store_hmac_calculated, cur_salt->store_hmac, 20))\n\t\t\t{\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t} else if (cur_salt->format == 1) {\n\t\t\tunsigned char compute_checkum[20];\n\t\t\tunsigned char iv[16];\n\t\t\tunsigned char key[32];\n\t\t\tTwofish_key tkey;\n\t\t\tint datalen = 0;\n\t\t\tunsigned char store_data_decrypted[MAX_STORE_DATA_LENGTH];\n\t\t\tSHA_CTX ctx;\n\n\t\t\tpkcs12_pbe_derive_key(1, cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_IV,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, iv, 16);\n\t\t\tpkcs12_pbe_derive_key(1, cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, key, 32);\n\t\t\tTwofish_prepare_key(key, 32, &tkey);\n\t\t\tdatalen = Twofish_Decrypt(&tkey, cur_salt->store_data, store_data_decrypted, cur_salt->store_data_length, iv);\n\t\t\tif (datalen < 0)\n\t\t\t\tcontinue;\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, store_data_decrypted, datalen - 20);\n\t\t\tSHA1_Final(compute_checkum, &ctx);\n\n\t\t\tif (!memcmp(compute_checkum, store_data_decrypted + datalen - 20, 20))\n\t\t\t{\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n#else\n\t\tsize_t lens[SSE_GROUP_SZ_SHA1], j;\n\t\tconst unsigned char *keys[SSE_GROUP_SZ_SHA1];\n\t\t// Load keys, and lengths\n\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {\n\t\t\tlens[j] = saved_len[index+j];\n\t\t\tkeys[j] = (const unsigned char*)(saved_key[index+j]);\n\t\t}\n\n\t\tif (cur_salt->format == 0) {\n\t\t\tunsigned char *mackey[SSE_GROUP_SZ_SHA1], real_keys[SSE_GROUP_SZ_SHA1][20];\n\t\t\tint mackeylen = cur_salt->hmac_key_size / 8;\n\t\t\t// mackeylen is only 2 bytes, and this results in lot\n\t\t\t// of collisions (which work just fine)\n\t\t\t//\n\t\t\t// FMT_NOT_EXACT can be turned on for BKS keystores\n\t\t\t// for finding more possible passwords\n\t\t\tunsigned char store_hmac_calculated[20];\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA1; ++j)\n\t\t\t\tmackey[j] = real_keys[j];\n\t\t\tpkcs12_pbe_derive_key_simd_sha1(cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\tkeys, lens, cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {\n\t\t\t\thmac_sha1(mackey[j], mackeylen, cur_salt->store_data,\n\t\t\t\t\t\tcur_salt->store_data_length,\n\t\t\t\t\t\tstore_hmac_calculated, 20);\n\n\t\t\t\tif (!memcmp(store_hmac_calculated, cur_salt->store_hmac, 20))\n\t\t\t\t{\n\t\t\t\t\tcracked[index+j] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t}\n\t\t} else if (cur_salt->format == 1) {\n\t\t\tunsigned char iv_[SSE_GROUP_SZ_SHA1][16], *iv[SSE_GROUP_SZ_SHA1];\n\t\t\tunsigned char ckey_[SSE_GROUP_SZ_SHA1][32], *ckey[SSE_GROUP_SZ_SHA1];\n\t\t\tTwofish_key tkey;\n\t\t\tint datalen = 0;\n\t\t\tunsigned char store_data_decrypted[MAX_STORE_DATA_LENGTH];\n\t\t\tSHA_CTX ctx;\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {\n\t\t\t\tiv[j] = iv_[j];\n\t\t\t\tckey[j] = ckey_[j];\n\t\t\t}\n\t\t\tpkcs12_pbe_derive_key_simd_sha1(cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_IV,\n\t\t\t\t\tkeys,\n\t\t\t\t\tlens, cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, iv, 16);\n\t\t\t// lengths get tromped on, so re-load them for the load keys call.\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA1; ++j)\n\t\t\t\tlens[j] = saved_len[index+j];\n\t\t\tpkcs12_pbe_derive_key_simd_sha1(cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_KEY,\n\t\t\t\t\tkeys,\n\t\t\t\t\tlens, cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, ckey, 32);\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {\n\t\t\t\tunsigned char compute_checkum[20];\n\t\t\t\tTwofish_prepare_key(ckey[j], 32, &tkey);\n\t\t\t\tdatalen = Twofish_Decrypt(&tkey, cur_salt->store_data, store_data_decrypted, cur_salt->store_data_length, iv[j]);\n\t\t\t\tif (datalen < 0)\n\t\t\t\t\tcontinue;\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, store_data_decrypted, datalen - 20);\n\t\t\t\tSHA1_Final(compute_checkum, &ctx);\n\n\t\t\t\tif (!memcmp(compute_checkum, store_data_decrypted + datalen - 20, 20))\n\t\t\t\t{\n\t\t\t\t\tcracked[index+j] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\tany_cracked |= 1;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/krb5_db_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "l(int *pcount, struct db_salt *_salt)\n{\n\tconst int count = *pcount;\n\tint index = 0;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char key[32], i;\n\t\tAES_KEY aeskey;\n\t\tint key_size;\n\n\t\tif (cur_salt->etype == 18 || cur_salt->etype == 17) {\n#ifdef SSE_GROUP_SZ_SHA1\n\t\t\tuint32_t Key[SSE_GROUP_SZ_SHA1][32/4];\n\t\t\tint lens[SSE_GROUP_SZ_SHA1];\n\t\t\tunsigned char *pin[SSE_GROUP_SZ_SHA1];\n\t\t\tunion {\n\t\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA1];\n\t\t\t\tunsigned char *poutc;\n\t\t\t} x;\n\t\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {\n\t\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\t\tx.pout[i] = Key[i];\n\t\t\t}\n\t\t\tif (cur_salt->etype == 18) {\n\t\t\t\tkey_size = 32;\n\t\t\t} else {\n\t\t\t\tkey_size = 16;\n\t\t\t}\n\t\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens, (const unsigned char*)cur_salt->saved_salt, strlen(cur_salt->saved_salt), 4096, &(x.poutc), key_size, 0);\n#else\n\t\t\tif (cur_salt->etype == 18) {\n\t\t\t\tkey_size = 32;\n\t\t\t} else {\n\t\t\t\tkey_size = 16;\n\t\t\t}\n\t\t\tpbkdf2_sha1((const unsigned char*)saved_key[index], strlen(saved_key[index]), (const unsigned char*)cur_salt->saved_salt, strlen(cur_salt->saved_salt), 4096, key, key_size, 0);\n\n\t\t\ti = 0;\n#ifdef SSE_GROUP_SZ_SHA1\n\t\t\tfor (; i < SSE_GROUP_SZ_SHA1; ++i) {\n\t\t\t\tmemcpy(key, Key[i], key_size);\n\n\t\t\t\tAES_set_encrypt_key(key, key_size * 8, &aeskey);\n\t\t\t\tAES_encrypt((unsigned char*)\"kerberos{\\x9b[+\\x93\\x13+\\x93\", (unsigned char*)(crypt_out[index+i]), &aeskey); // the weird constant string comes from \"nfold\" function\n\t\t\t\tAES_encrypt((unsigned char*)(crypt_out[index+i]), (unsigned char*)&crypt_out[index+i][4], &aeskey);\n#ifdef SSE_GROUP_SZ_SHA1\n\t\t\t}\n\n\t\t} else if (cur_salt->etype == 3) {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tdes_string_to_key_shishi(saved_key[index+i], strlen(saved_key[index+i]), cur_salt->saved_salt, strlen(cur_salt->saved_salt), (unsigned char*)(crypt_out[index+i]));\n\t\t\t}\n\t\t}\n\t}\n\n\treturn count;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pem_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt, SALTLEN, cur_salt->iterations, pout, cur_salt->key_length, 0);\n#else\n\t\tpbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]), cur_salt->salt, SALTLEN, cur_salt->iterations, master[0], cur_salt->key_length, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tif (pem_decrypt(master[i], cur_salt->iv, cur_salt->ciphertext, cur_salt) == 0)\n\t\t\t\tcracked[index+i] = 1;\n\t\t\telse\n\t\t\t\tcracked[index+i] = 0;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/leet_cc_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index += NBKEYS) {\n\t\tsph_whirlpool_context wctx;\n\t\tint i;\n\t\tunion {\n\t\t\tunsigned char buf[BINARY_SIZE];\n\t\t\tuint64_t p64[1];\n\t\t} output1[NBKEYS], output2;\n#ifdef SIMD_COEF_64\n\t\t// Not sure why JTR_ALIGN(MEM_ALIGN_SIMD) does n ot work here\n\t\t// but if used, it cores travis-ci, so we use mem_align instead\n\t\tunsigned char _in[8*16*MIN_KEYS_PER_CRYPT+MEM_ALIGN_SIMD];\n\t\tunsigned char _out[8*8*MIN_KEYS_PER_CRYPT+MEM_ALIGN_SIMD];\n\t\tuint64_t *in = (uint64_t*)mem_align(_in, MEM_ALIGN_SIMD);\n\t\tuint64_t *out = (uint64_t*)mem_align(_out, MEM_ALIGN_SIMD);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tint x80_off = saved_len[index+i]+cur_salt->saltlen;\n\t\t\tunsigned char *cp = (unsigned char*)&(in[16*i]);\n\t\t\tmemcpy(cp, saved_key[index+i], saved_len[index+i]);\n\t\t\tmemcpy(&cp[saved_len[index+i]], cur_salt->salt, cur_salt->saltlen);\n\t\t\tcp[x80_off] = 0x80;\n\t\t\tmemset(&cp[x80_off+1], 0, 120-(x80_off+1));\n\t\t\tin[i*16+15] = x80_off<<3;\n\t\t}\n\t\tSIMDSHA512body(in, out, NULL, SSEi_FLAT_IN);\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\toutput1[i].p64[0] = JOHNSWAP64(out[((i/SIMD_COEF_64)*8*SIMD_COEF_64+i%SIMD_COEF_64)]);\n#else\n\t\t\toutput1[i].p64[0] = out[((i/SIMD_COEF_64)*8*SIMD_COEF_64+i%SIMD_COEF_64)];\n\n\t\t}\n#else\n\t\tSHA512_CTX sctx;\n\n\t\tSHA512_Init(&sctx);\n\t\tSHA512_Update(&sctx, saved_key[index], saved_len[index]);\n\t\tSHA512_Update(&sctx, cur_salt->salt, cur_salt->saltlen);\n\t\tSHA512_Final(output1[0].buf, &sctx);\n\n\t\tfor (i = 0; i < NBKEYS; ++i) {\n\t\t\tsph_whirlpool_init(&wctx);\n\t\t\tsph_whirlpool(&wctx, cur_salt->salt, cur_salt->saltlen);\n\t\t\tsph_whirlpool(&wctx, saved_key[index+i], saved_len[index+i]);\n\t\t\tsph_whirlpool_close(&wctx, output2.buf);\n\t\t\tcrypt_out[index+i][0] = output1[i].p64[0] ^ output2.p64[0];\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/argon2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " crypt_all(int *pcount, struct db_salt *salt)\n{\n\tint i;\n\tconst int count = *pcount;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\targon2_hash(saved_salt.t_cost, saved_salt.m_cost, saved_salt.lanes, saved_key[i], saved_len[i], saved_salt.salt,\n\t\t saved_salt.salt_length, crypted[i], saved_salt.hash_size, 0, 0, saved_salt.type, ARGON2_VERSION_NUMBER, memory[THREAD_NUMBER%sc_threads].aligned, pseudo_rands[THREAD_NUMBER%sc_threads]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pfx_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "->mac_algo == 512 || cur_salt->mac_algo == 384)\n\t\tinc = SSE_GROUP_SZ_SHA512;\n#endif\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += inc) {\n#if !defined(SIMD_COEF_32)\n\t\tif (cur_salt->mac_algo == 1) {\n\t\t\tunsigned char mackey[20];\n\t\t\tint mackeylen = cur_salt->key_length;\n\n\t\t\tpkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\thmac_sha1(mackey, mackeylen, cur_salt->data,\n\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t(unsigned char*)crypt_out[index],\n\t\t\t\t\tBINARY_SIZE);\n\t\t} else if (cur_salt->mac_algo == 256) {\n\t\t\tunsigned char mackey[32];\n\t\t\tint mackeylen = cur_salt->key_length;\n\t\t\tpkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\thmac_sha256(mackey, mackeylen, cur_salt->data,\n\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t(unsigned char*)crypt_out[index],\n\t\t\t\t\tBINARY_SIZE);\n\t\t} else if (cur_salt->mac_algo == 512) {\n\t\t\tunsigned char mackey[64];\n\t\t\tint mackeylen = cur_salt->key_length;\n\t\t\tpkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\thmac_sha512(mackey, mackeylen, cur_salt->data,\n\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t(unsigned char*)crypt_out[index],\n\t\t\t\t\tBINARY_SIZE);\n\t\t} else if (cur_salt->mac_algo == 224) {\n\t\t\tunsigned char mackey[32];\n\t\t\tint mackeylen = cur_salt->key_length;\n\t\t\tpkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\thmac_sha224(mackey, mackeylen, cur_salt->data,\n\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t(unsigned char*)crypt_out[index],\n\t\t\t\t\tBINARY_SIZE);\n\t\t} else if (cur_salt->mac_algo == 384) {\n\t\t\tunsigned char mackey[64];\n\t\t\tint mackeylen = cur_salt->key_length;\n\t\t\tpkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\thmac_sha384(mackey, mackeylen, cur_salt->data,\n\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t(unsigned char*)crypt_out[index],\n\t\t\t\t\tBINARY_SIZE);\n\t\t}\n\n#else\n\t\tif (cur_salt->mac_algo == 1) {\n\t\t\tunsigned char *mackey[SSE_GROUP_SZ_SHA1], real_keys[SSE_GROUP_SZ_SHA1][20];\n\t\t\tconst unsigned char *keys[SSE_GROUP_SZ_SHA1];\n\t\t\tint mackeylen = cur_salt->key_length, j;\n\t\t\tsize_t lens[SSE_GROUP_SZ_SHA1];\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {\n\t\t\t\tmackey[j] = real_keys[j];\n\t\t\t\tlens[j] = saved_len[index+j];\n\t\t\t\tkeys[j] = (const unsigned char*)(saved_key[index+j]);\n\t\t\t}\n\t\t\tpkcs12_pbe_derive_key_simd_sha1(cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY, keys,\n\t\t\t\t\tlens, cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {\n\t\t\t\thmac_sha1(mackey[j], mackeylen, cur_salt->data,\n\t\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t\t(unsigned char*)crypt_out[index+j],\n\t\t\t\t\t\tBINARY_SIZE);\n\t\t\t}\n\t\t} else if (cur_salt->mac_algo == 256) {\n\t\t\tunsigned char *mackey[SSE_GROUP_SZ_SHA256], real_keys[SSE_GROUP_SZ_SHA256][32];\n\t\t\tconst unsigned char *keys[SSE_GROUP_SZ_SHA256];\n\t\t\tint mackeylen = cur_salt->key_length, j;\n\t\t\tsize_t lens[SSE_GROUP_SZ_SHA256];\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) {\n\t\t\t\tmackey[j] = real_keys[j];\n\t\t\t\tlens[j] = saved_len[index+j];\n\t\t\t\tkeys[j] = (const unsigned char*)(saved_key[index+j]);\n\t\t\t}\n\t\t\tpkcs12_pbe_derive_key_simd_sha256(cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY, keys,\n\t\t\t\t\tlens, cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) {\n\t\t\t\thmac_sha256(mackey[j], mackeylen, cur_salt->data,\n\t\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t\t(unsigned char*)crypt_out[index+j],\n\t\t\t\t\t\tBINARY_SIZE);\n\t\t\t}\n\t\t} else if (cur_salt->mac_algo == 512) {\n#if defined(SIMD_COEF_64)\n\t\t\tunsigned char *mackey[SSE_GROUP_SZ_SHA512], real_keys[SSE_GROUP_SZ_SHA512][64];\n\t\t\tconst unsigned char *keys[SSE_GROUP_SZ_SHA512];\n\t\t\tint mackeylen = cur_salt->key_length, j;\n\t\t\tsize_t lens[SSE_GROUP_SZ_SHA512];\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA512; ++j) {\n\t\t\t\tmackey[j] = real_keys[j];\n\t\t\t\tlens[j] = saved_len[index+j];\n\t\t\t\tkeys[j] = (const unsigned char*)(saved_key[index+j]);\n\t\t\t}\n\t\t\tpkcs12_pbe_derive_key_simd_sha512(cur_salt->iteration_count,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY, keys,\n\t\t\t\t\tlens, cur_salt->salt,\n\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA512; ++j) {\n\t\t\t\thmac_sha512(mackey[j], mackeylen, cur_salt->data,\n\t\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t\t(unsigned char*)crypt_out[index+j],\n\t\t\t\t\t\tBINARY_SIZE);\n\t\t\t}\n#else\n\t\t\tint j;\n\n\t\t\tfor (j = 0; j < inc; ++j) {\n\t\t\t\tunsigned char mackey[64];\n\t\t\t\tint mackeylen = cur_salt->key_length;\n\t\t\t\tpkcs12_pbe_derive_key(512, cur_salt->iteration_count,\n\t\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\t\t(unsigned char*)saved_key[index+j],\n\t\t\t\t\t\tsaved_len[index+j], cur_salt->salt,\n\t\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\t\thmac_sha512(mackey, mackeylen, cur_salt->data,\n\t\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t\t(unsigned char*)crypt_out[index+j],\n\t\t\t\t\t\tBINARY_SIZE);\n\t\t\t}\n\n\t\t} else if (cur_salt->mac_algo == 224) {\n\t\t\tint j;\n\n\t\t\tfor (j = 0; j < inc; ++j) {\n\t\t\t\tunsigned char mackey[32];\n\t\t\t\tint mackeylen = cur_salt->key_length;\n\t\t\t\tpkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count,\n\t\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\t\t(unsigned char*)saved_key[index+j],\n\t\t\t\t\t\tsaved_len[index+j], cur_salt->salt,\n\t\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\t\thmac_sha224(mackey, mackeylen, cur_salt->data,\n\t\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t\t(unsigned char*)crypt_out[index+j],\n\t\t\t\t\t\tBINARY_SIZE);\n\t\t\t}\n\t\t} else if (cur_salt->mac_algo == 384) {\n\t\t\tint j;\n\n\t\t\tfor (j = 0; j < inc; ++j) {\n\t\t\t\tunsigned char mackey[64];\n\t\t\t\tint mackeylen = cur_salt->key_length;\n\t\t\t\tpkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count,\n\t\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_MAC_KEY,\n\t\t\t\t\t\t(unsigned char*)saved_key[index+j],\n\t\t\t\t\t\tsaved_len[index+j], cur_salt->salt,\n\t\t\t\t\t\tcur_salt->saltlen, mackey, mackeylen);\n\n\t\t\t\thmac_sha384(mackey, mackeylen, cur_salt->data,\n\t\t\t\t\t\tcur_salt->data_length,\n\t\t\t\t\t\t(unsigned char*)crypt_out[index+j],\n\t\t\t\t\t\tBINARY_SIZE);\n\t\t\t}\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/armory_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(index) shared(count, failed, cracked, salt, max_threads, memory, saved_key, saved_salt, crypt_out)", "context_chars": 100, "text": ", struct db_salt *salt)\n{\n\tint failed = 0, cracked = !salt, count = *pcount, index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef _OPENMP\n\t\tint t = omp_get_thread_num();\n\t\tif (t >= max_threads) {\n\t\t\tfailed = -1;\n\t\t\tcontinue;\n\t\t}\n#else\n\t\tconst int t = 0;\n\n\n\t\terrno = 0;\n\t\tderived_key dk[MIN_KEYS_PER_CRYPT];\n\t\tif (derive_keys(&memory[t], index, dk)) {\n\t\t\tfailed = errno ? errno : ENOMEM;\n#ifndef _OPENMP\n\t\t\tbreak;\n\n\t\t}\n\n\t\tint subindex;\n\t\tfor (subindex = 0; subindex < MIN_KEYS_PER_CRYPT; subindex++) {\n\t\t\tderive_address(memory, &dk[subindex], crypt_out[index + subindex]);\n\n\t\t\tif (salt) {\n\t\t\t\tstruct db_password *pw = salt->list;\n\t\t\t\tdo {\n\t\t\t\t\tif (!memcmp(pw->binary, crypt_out[index + subindex], BINARY_SIZE))\n\t\t\t\t\t\tcracked = -1;\n\t\t\t\t} while ((pw = pw->next));\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for default(none) private(index) shared(count, failed, cracked, salt, max_threads, memory, saved_key, saved_salt, crypt_out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/bestcrypt_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ount = *pcount;\n\tint index;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {\n\t\tif (cur_salt->hash_id == bchaWhirlpool512) {\n\t\t\tunsigned char key[kBCPasswordMaximumKeySize];\n\t\t\tint keylen = 0;\n\t\t\tpbe_format_v5_64* pbe64;\n\t\t\tunsigned char out[256] = {0};\n\t\t\tAES_KEY aes_key;\n\t\t\tsph_whirlpool_context ctx;\n\t\t\tunsigned char hash[64];\n\t\t\tunsigned char iv[16] = {0};\n\t\t\tstruct KGEncryptedBlock64 *p;\n\n\t\t\tif (cur_salt->mode_id == kBCMode_XTS)\n\t\t\t\tkeylen = 64; // for AES-256 XTS mode\n\t\t\telse if (cur_salt->mode_id == kBCMode_CBC)\n\t\t\t\tkeylen = 32;\n\t\t\tpkcs12_pbe_derive_key(2, cur_salt->iterations, // 2 is a hack to indicate Whirlpool-512\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_KEY, // key material\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->salt_size, key, keylen);\n\t\t\tpbe64 = (pbe_format_v5_64*)cur_salt->key;\n\t\t\tmemcpy(iv, pbe64->iv, 8);\n\t\t\tif (cur_salt->mode_id == kBCMode_XTS) {\n\t\t\t\tAES_XTS_decrypt_custom_tweak(key, iv, out, pbe64->keyblock, 256, 256);\n\t\t\t}\n\t\t\tif (cur_salt->mode_id == kBCMode_CBC) {\n\t\t\t\t// decrypt data stored in encrypted block, AES CBC mode\n\t\t\t\tmemcpy(iv + 8, pbe64->iv, 8); // isn't BestCrypt great?\n\t\t\t\tAES_set_decrypt_key(key, 256, &aes_key);\n\t\t\t\tAES_cbc_encrypt(pbe64->keyblock, out, 160, &aes_key, iv, AES_DECRYPT);\n\t\t\t}\n\t\t\tsph_whirlpool_init(&ctx);\n\t\t\tsph_whirlpool(&ctx, out, 90); // only 90 bytes are used, calculate_digest(hash, data, sizeof(*data), digest), sizeof(*data) == 90\n\t\t\tsph_whirlpool_close(&ctx, hash);\n\t\t\tp = (struct KGEncryptedBlock64 *)out;\n\t\t\tcracked[index] = (0 == memcmp(hash, p->digest, kDigestSize32));\n\t\t} else if (cur_salt->hash_id == bchaSHA256) {\n\t\t\tunsigned char key[kBCPasswordMaximumKeySize];\n\t\t\tint keylen = 0;\n\t\t\tpbe_format_v5_32* pbe32;\n\t\t\tunsigned char out[256] = {0};\n\t\t\tAES_KEY aes_key;\n\t\t\tSHA256_CTX ctx;\n\t\t\tunsigned char hash[32];\n\t\t\tunsigned char iv[16] = {0};\n\t\t\tstruct KGEncryptedBlock32 *p;\n\n\t\t\tif (cur_salt->mode_id == kBCMode_XTS)\n\t\t\t\tkeylen = 64;\n\t\t\telse if (cur_salt->mode_id == kBCMode_CBC)\n\t\t\t\tkeylen = 32;\n\t\t\tpkcs12_pbe_derive_key(256, cur_salt->iterations,\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->salt_size, key, keylen);\n\t\t\tpbe32 = (pbe_format_v5_32*)cur_salt->key;\n\t\t\tmemcpy(iv, pbe32->iv, 8); // iv[8:16] is all zero for XTS mode\n\t\t\tif (cur_salt->mode_id == kBCMode_XTS) {\n\t\t\t\tAES_XTS_decrypt_custom_tweak(key, iv, out, pbe32->keyblock, 256, 256);\n\t\t\t} else if (cur_salt->mode_id == kBCMode_CBC) {\n\t\t\t\tmemcpy(iv + 8, pbe32->iv, 8); // iv[8:16] is repeat of iv[0:8] for CBC mode\n\t\t\t\tAES_set_decrypt_key(key, 256, &aes_key);\n\t\t\t\tAES_cbc_encrypt(pbe32->keyblock, out, 128, &aes_key, iv, AES_DECRYPT);\n\t\t\t}\n\t\t\tSHA256_Init(&ctx);\n\t\t\tSHA256_Update(&ctx, out, 90);\n\t\t\tSHA256_Final(hash, &ctx);\n\t\t\tp = (struct KGEncryptedBlock32 *)out;\n\t\t\tcracked[index] = (0 == memcmp(hash, p->digest, kDigestSize32));\n\t\t} else if (cur_salt->hash_id == pgphaSHA512) {\n\t\t\tunsigned char key[kBCPasswordMaximumKeySize];\n\t\t\tint keylen = 0;\n\t\t\tpbe_format_v5_64* pbe64;\n\t\t\tunsigned char out[256] = {0};\n\t\t\tAES_KEY aes_key;\n\t\t\tSHA512_CTX ctx;\n\t\t\tunsigned char hash[64];\n\t\t\tunsigned char iv[16] = {0};\n\t\t\tstruct KGEncryptedBlock64 *p;\n\n\t\t\tif (cur_salt->mode_id == kBCMode_XTS)\n\t\t\t\tkeylen = 64;\n\t\t\telse if (cur_salt->mode_id == kBCMode_CBC)\n\t\t\t\tkeylen = 32;\n\t\t\tpkcs12_pbe_derive_key(10, cur_salt->iterations, // 10 is a hack to indicate BestCrypt specific PKCS12 PBE with SHA-512\n\t\t\t\t\tMBEDTLS_PKCS12_DERIVE_KEY,\n\t\t\t\t\t(unsigned char*)saved_key[index],\n\t\t\t\t\tsaved_len[index], cur_salt->salt,\n\t\t\t\t\tcur_salt->salt_size, key, keylen);\n\t\t\tpbe64 = (pbe_format_v5_64*)cur_salt->key;\n\t\t\tmemcpy(iv, pbe64->iv, 8);\n\t\t\tif (cur_salt->mode_id == kBCMode_XTS) {\n\t\t\t\tAES_XTS_decrypt_custom_tweak(key, iv, out, pbe64->keyblock, 256, 256);\n\t\t\t} else if (cur_salt->mode_id == kBCMode_CBC) {\n\t\t\t\tmemcpy(iv + 8, pbe64->iv, 8);\n\t\t\t\tAES_set_decrypt_key(key, 256, &aes_key);\n\t\t\t\tAES_cbc_encrypt(pbe64->keyblock, out, 160, &aes_key, iv, AES_DECRYPT);\n\t\t\t}\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, out, 90);\n\t\t\tSHA512_Final(hash, &ctx);\n\t\t\tp = (struct KGEncryptedBlock64 *)out;\n\t\t\tcracked[index] = (0 == memcmp(hash, p->digest, kDigestSize32));\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ssh_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char out[N];\n\n\t\t// don't do full decryption (except for EC keys)\n\t\tcommon_crypt_code(saved_key[index], out, 0);\n\n\t\tif (cur_salt->cipher == 0) { // 3DES\n\t\t\tcracked[index] =\n\t\t\t\t!check_padding_and_structure(out, cur_salt->ctl, 0, 8);\n\t\t} else if (cur_salt->cipher == 1) {\n\t\t\tcracked[index] =\n\t\t\t\t!check_padding_and_structure(out, cur_salt->ctl, 0, 16);\n\t\t} else if (cur_salt->cipher == 2 || cur_salt->cipher == 6) { // new ssh key format handling\n\t\t\tcracked[index] =\n\t\t\t\t!check_structure_bcrypt(out, cur_salt->ctl);\n\t\t} else if (cur_salt->cipher == 3) { // EC keys\n\t\t\tcracked[index] =\n\t\t\t\t!check_padding_and_structure_EC(out, cur_salt->ctl, 0);\n\t\t} else if (cur_salt->cipher == 4) { // AES-192\n\t\t\tcracked[index] =\n\t\t\t\t!check_padding_and_structure(out, cur_salt->ctl, 0, 16);\n\t\t} else if (cur_salt->cipher == 5) { // AES-256\n\t\t\tcracked[index] =\n\t\t\t\t!check_padding_and_structure(out, cur_salt->ctl, 0, 16);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/blackberry_ES10_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tint j;\n\t\tSHA512_CTX ctx;\n#ifdef SIMD_COEF_64\n/* We use SSEi_HALF_IN, so can halve SHA_BUF_SIZ */\n#undef SHA_BUF_SIZ\n#define SHA_BUF_SIZ 8\n\t\tunsigned int i;\n\t\tunsigned char _IBuf[8*SHA_BUF_SIZ*MIN_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys;\n\t\tuint64_t *keys64, tmpBuf64[SHA_BUF_SIZ], *p64;\n\t\tkeys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);\n\t\tkeys64 = (uint64_t*)keys;\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i]));\n\t\t\tSHA512_Update(&ctx, cur_salt->salt, strlen((char*)cur_salt->salt));\n\t\t\tSHA512_Final((unsigned char *)tmpBuf64, &ctx);\n\t\t\tp64 = &keys64[i%SIMD_COEF_64+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64];\n\t\t\tfor (j = 0; j < 8; ++j)\n#if ARCH_LITTLE_ENDIAN==1\n\t\t\t\tp64[j*SIMD_COEF_64] = JOHNSWAP64(tmpBuf64[j]);\n#else\n\t\t\t\tp64[j*SIMD_COEF_64] = tmpBuf64[j];\n\n\t\t}\n\t\tuint64_t rounds = 98;\n\t\tSIMDSHA512body(keys, keys64, &rounds, SSEi_HALF_IN|SSEi_LOOP);\n\t\tSIMDSHA512body(keys, (uint64_t*)crypt_out[index], NULL, SSEi_HALF_IN|SSEi_FLAT_OUT);\n#else\n\t\tSHA512_Init(&ctx);\n\t\tSHA512_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tSHA512_Update(&ctx, cur_salt->salt, strlen((char*)cur_salt->salt));\n\t\tSHA512_Final((unsigned char *)crypt_out[index], &ctx);\n\n\t\t/* now \"h\" (crypt_out[index] becomes our input\n\t\t * total SHA-512 calls => 101 */\n\t\tfor (j = 0; j < 99; j++) {\n\t\t\tSHA512_CTX ctx;\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, (unsigned char*)crypt_out[index], 64);\n\t\t\tSHA512_Final((unsigned char *)crypt_out[index], &ctx);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/dragonfly3_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tSHA256_CTX ctx;\n\n\t\tSHA256_Init(&ctx);\n\n\t\t/* First the password */\n\t\tSHA256_Update(&ctx, saved_key[index], saved_len[index]);\n\n\t\t/* Then the salt, including the $3$ magic */\n\t\tSHA256_Update(&ctx, cur_salt, salt_len);\n\n\t\tSHA256_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/tiger_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_tiger_context ctx;\n\n\t\tsph_tiger_init(&ctx);\n\t\tsph_tiger(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_tiger_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/opencl_gpg_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "filingEvent[2]),\n\t\t\"Copy result back\");\n\n\tif (ocl_autotune_running)\n\t\treturn count;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (gpg_common_check(outbuffer[index].v, gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm))) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mssql12_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ll(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index = 0;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_64\n\t\tif (new_keys) {\n\t\t\tint i;\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\t\tunsigned char *wucp = (unsigned char *)&saved_key[index + i];\n\t\t\t\tint j, len = (saved_key[index + i].u64[15] >> 3) - SALT_SIZE;\n\n\t\t\t\tfor (j = 0; j < SALT_SIZE; j++)\n\t\t\t\t\twucp[len + j] = cursalt[j];\n\n\t\t\t\twucp[len + 4] = 0x80;\n\t\t\t}\n\t\t}\n\t\tSIMDSHA512body(&saved_key[index], &crypt_out[BASE_IDX], NULL, SSEi_REVERSE_STEPS | SSEi_FLAT_IN);\n#else\n\t\tSHA512_CTX ctx;\n\t\tmemcpy(saved_key[index]+saved_len[index], cursalt, SALT_SIZE);\n\t\tSHA512_Init(&ctx );\n\t\tSHA512_Update(&ctx, saved_key[index], saved_len[index]+SALT_SIZE );\n\t\tSHA512_Final((unsigned char *)crypt_out[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/dashlane_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0]) * cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char pkey[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = pkey[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, 32, 10204, pout, 32, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\tpbkdf2_sha1((unsigned char *)saved_key[index+i],\n\t\t\t\t\tstrlen(saved_key[index+i]),\n\t\t\t\t\tcur_salt->salt, 32, 10204,\n\t\t\t\t\tpkey[i], 32, 0);\n\t\t}\n\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\tif (dashlane_verify(cur_salt, pkey[i]))\n\t\t\t\tcracked[index+i] = 1;\n\t\t\telse\n\t\t\t\tcracked[index+i] = 0;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawSHA384_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_64\n\t\tSIMDSHA512body(&saved_key[index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64],\n\t\t &crypt_out[index/SIMD_COEF_64*8*SIMD_COEF_64],\n\t\t NULL, SSEi_REVERSE_STEPS|SSEi_MIXED_IN|SSEi_CRYPT_SHA384);\n#else\n\t\tSHA512_CTX ctx;\n\t\tSHA384_Init(&ctx);\n\t\tSHA384_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA384_Final((unsigned char *)crypt_out[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/phpassMD5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t loops = 1, index;\n\n\tloops = (count + MIN_KEYS_PER_CRYPT - 1) / MIN_KEYS_PER_CRYPT;\n#ifdef _OPENMP\nfor (index = 0; index < loops; index++) {\n\t\tunsigned Lcount;\n#ifdef SIMD_COEF_32\n\n\t\tSIMDmd5body(cursalt[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT);\n\t\tLcount = loopCnt-1;\n\t\tdo {\n\t\t\tSIMDmd5body(hash_key[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT);\n\t\t} while (--Lcount);\n\t\t// last hash goes into crypt_key\n\t\tSIMDmd5body(hash_key[index], crypt_key[index], NULL, 0);\n#else\n\t\tMD5_CTX ctx;\n\t\tMD5_Init( &ctx );\n\t\tMD5_Update( &ctx, cursalt, 8 );\n\t\tMD5_Update( &ctx, saved_key[index], saved_len[index] );\n\t\tMD5_Final( (unsigned char *) crypt_key[index], &ctx);\n\n\t\tstrcpy(((char*)&(crypt_key[index]))+BINARY_SIZE, saved_key[index]);\n\t\tLcount = loopCnt;\n\n\t\tdo {\n\t\t\tMD5_Init( &ctx );\n\t\t\tMD5_Update( &ctx, crypt_key[index], BINARY_SIZE+saved_len[index]);\n\t\t\tMD5_Final( (unsigned char *)&(crypt_key[index]), &ctx);\n\t\t} while (--Lcount);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/NETNTLMv2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "hallenge + 1 + identity_length + 1) << 8) | *(challenge + 1 + identity_length + 2);\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\tunsigned char ntlm_v2_hash[16];\n\t\tHMACMD5Context ctx;\n\n\t\tif (!keys_prepared) {\n\t\t\tunsigned char ntlm[16];\n\t\t\tint len;\n\n\t\t\t/* Generate 16-byte NTLM hash */\n\t\t\tlen = E_md4hash(saved_plain[i], saved_len[i], ntlm);\n\n\t\t\t// We do key setup of the next HMAC_MD5 here (once per salt)\n\t\t\thmac_md5_init_K16(ntlm, &saved_ctx[i]);\n\n\t\t\tif (len <= 0)\n\t\t\t\tsaved_plain[i][-len] = 0; // match truncation\n\t\t}\n\n\t\t/* HMAC-MD5(Username + Domain, NTLM Hash) */\n\t\tmemcpy(&ctx, &saved_ctx[i], sizeof(ctx));\n\t\thmac_md5_update((unsigned char *)&challenge[1], identity_length, &ctx);\n\t\thmac_md5_final(ntlm_v2_hash, &ctx);\n\n\t\t/* --- Blob Construction --- */\n\n\t\t/*\n\t\t The blob consists of the target (from Type 2 message), client nonce and timestamp.\n\t\t This data was provided by the client during authentication and we can use it as is.\n\t\t*/\n\n\t\t/* --- HMAC #2 Calculations --- */\n\n\t\t/*\n\t\t The (server) challenge from the Type 2 message is concatenated with the blob. The\n\t\t HMAC-MD5 message authentication code algorithm is applied to this value using the\n\t\t 16-byte NTLMv2 hash (calculated above) as the key. This results in a 16-byte output\n\t\t value.\n\t\t*/\n\n\t\t/*\n\t\t Generate 16-byte non-client nonce portion of NTLMv2 Response\n\t\t HMAC-MD5(Challenge + Nonce, NTLMv2 Hash)\n\n\t\t The length of the challenge was set in get_salt(). We find the server\n\t\t challenge and blob following the identity and challenge size value.\n\t\t challenge -> Identity length, Identity\\0, Size (2 bytes), Server Challenge + Client Challenge (Blob)\n\t\t*/\n\t\thmac_md5(ntlm_v2_hash, challenge + 1 + identity_length + 1 + 2, challenge_size, (unsigned char*)output[i]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/multibit_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\tint failed = 0;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char iv[16];\n\t\tunsigned char key[32];\n\t\tunsigned char outbuf[16];\n\t\tAES_KEY aes_decrypt_key;\n\t\tint len = strlen(saved_key[index]);\n\n#ifdef _OPENMP\n\t\tif (cracked[index]) /* avoid false sharing of nearby elements */\n\n\t\t\tcracked[index] = 0;\n\n\t\tif (cur_salt->type == 1) {\n\t\t\tunsigned char c;\n\t\t\tMD5_CTX ctx;\n\n\t\t\t// key\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, saved_key[index], len);\n\t\t\tMD5_Update(&ctx, cur_salt->salt, 8);\n\t\t\tMD5_Final(key, &ctx);\n\t\t\t// key + 16\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, key, 16);\n\t\t\tMD5_Update(&ctx, saved_key[index], len);\n\t\t\tMD5_Update(&ctx, cur_salt->salt, 8);\n\t\t\tMD5_Final(key + 16, &ctx);\n\t\t\t// iv\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, key + 16, 16);\n\t\t\tMD5_Update(&ctx, saved_key[index], len);\n\t\t\tMD5_Update(&ctx, cur_salt->salt, 8);\n\t\t\tMD5_Final(iv, &ctx);\n\n\t\t\tAES_set_decrypt_key(key, 256, &aes_decrypt_key);\n\t\t\tAES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\n\t\t\tc = outbuf[0];\n\t\t\tif (c == 'L' || c == 'K' || c == '5' || c == 'Q') {\n\t\t\t\t// Does it look like a base58 private key (MultiBit, MultiDoge, or oldest-format Android key backup)? (btcrecover)\n\t\t\t\t// check if bytes are in base58 set [1-9A-HJ-NP-Za-km-z]\n\t\t\t\tif (is_base58(outbuf + 1, 15)) {\n\t\t\t\t\t// decrypt second block\n\t\t\t\t\tAES_cbc_encrypt(cur_salt->block + 16, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\t\t\t\t\tif (is_base58(outbuf, 16))\n\t\t\t\t\t\tcracked[index] = 1;\n\t\t\t\t}\n\t\t\t} else if (c == '#') {\n\t\t\t\t// Does it look like a KnC for Android key backup?\n\t\t\t\tif (memcmp((const char*)outbuf, \"# KEEP YOUR PRIV\", 8) == 0) // 8 should be enough\n\t\t\t\t\tcracked[index] = 1;\n\t\t\t} else if (c == '\\x0a') {\n\t\t\t\t// Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? (btcrecover)?\n\t\t\t\tif (is_bitcoinj_protobuf_data(outbuf))\n\t\t\t\t\tcracked[index] = 1;\n\t\t\t}\n\n\t\t} else if (cur_salt->type == 2) {\n\t\t\tUTF16 password[PLAINTEXT_LENGTH * 2 + 1];\n\n\t\t\tlen = enc_to_utf16_be(password, PLAINTEXT_LENGTH, (const unsigned char*)saved_key[index], len + 1);\n\t\t\tif (len < 0)\n\t\t\t\tlen = strlen16(password);\n\n#ifdef _OPENMP\n\t\t\tint t = omp_get_thread_num();\n\t\t\tif (t >= max_threads) {\n\t\t\t\tfailed = -1;\n\t\t\t\tcontinue;\n\t\t\t}\n#else\n\t\t\tconst int t = 0;\n\n\t\t\tstatic const yescrypt_params_t params = { .N = 16384, .r = 8, .p = 1 };\n\t\t\tif (yescrypt_kdf(NULL, &local[t],\n\t\t\t (const uint8_t *)password, (len + 1) * 2,\n\t\t\t (const uint8_t *)salt_hardcoded, 8,\n\t\t\t ¶ms,\n\t\t\t key, 32)) {\n\t\t\t\tfailed = errno ? errno : EINVAL;\n#ifndef _OPENMP\n\t\t\t\tbreak;\n\n\t\t\t}\n\n\t\t\t// 1\n\t\t\tAES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key);\n\t\t\tmemcpy(iv, cur_salt->iv, 16);\n\t\t\tAES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\n\t\t\tif (is_bitcoinj_protobuf_data(outbuf))\n\t\t\t\tcracked[index] = 1;\n\t\t\telse {\n\t\t\t\t// 2\n\t\t\t\tAES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key);\n\t\t\t\tmemcpy(iv, iv_hardcoded, 16);\n\t\t\t\tAES_cbc_encrypt(cur_salt->block2, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\t\t\t\tif (is_bitcoinj_protobuf_data(outbuf))\n\t\t\t\t\tcracked[index] = 1;\n\t\t\t}\n\t\t} else if (cur_salt->type == 3) {\n\t\t\tUTF16 password[PLAINTEXT_LENGTH * 2 + 1];\n\n\t\t\tlen = enc_to_utf16_be(password, PLAINTEXT_LENGTH, (const unsigned char*)saved_key[index], len + 1);\n\t\t\tif (len < 0)\n\t\t\t\tlen = strlen16(password);\n\n#ifdef _OPENMP\n\t\t\tint t = omp_get_thread_num();\n\t\t\tif (t >= max_threads) {\n\t\t\t\tfailed = -1;\n\t\t\t\tcontinue;\n\t\t\t}\n#else\n\t\t\tconst int t = 0;\n\n\t\t\tyescrypt_params_t params = { .N = cur_salt->n, .r = cur_salt->r, .p = cur_salt->p };\n\t\t\tif (yescrypt_kdf(NULL, &local[t],\n\t\t\t (const uint8_t *)password, (len + 1) * 2,\n\t\t\t (const uint8_t *)cur_salt->salt, 8,\n\t\t\t ¶ms,\n\t\t\t key, 32)) {\n\t\t\t\tfailed = errno ? errno : EINVAL;\n#ifndef _OPENMP\n\t\t\t\tbreak;\n\n\t\t\t}\n\n\t\t\tmemcpy(iv, cur_salt->block, 16);\n\t\t\tAES_set_decrypt_key(key, 256, &aes_decrypt_key);\n\t\t\tAES_cbc_encrypt(cur_salt->block + 16, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\n\t\t\tif (!memcmp(outbuf, \"\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\", 16))\n\t\t\t\tcracked[index] = 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawSHA1_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "int index = 0;\n\n#ifdef _OPENMP\n\tint loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;\n\nfor (index = 0; index < loops; ++index)\n\n\t{\n#if SIMD_COEF_32\n\t\tSIMDSHA1body(saved_key[index], crypt_key[index], NULL, SSEi_flags);\n#else\n\t\tSHA_CTX ctx;\n\n\t\tSHA1_Init( &ctx );\n\t\tSHA1_Update( &ctx, (unsigned char*) saved_key[index], strlen( saved_key[index] ) );\n\t\tSHA1_Final( (unsigned char*) crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/dpapimk_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char *passwordBuf;\n\t\tint passwordBufSize;\n\t\tunsigned char *sidBuf;\n\t\tint sidBufSize;\n\t\tunsigned char out[MIN_KEYS_PER_CRYPT][KEY_LEN2 + IV_LEN2];\n\t\tunsigned char out2[MIN_KEYS_PER_CRYPT][KEY_LEN2 + IV_LEN2];\n\t\tSHA_CTX ctx;\n\t\tMD4_CTX ctx2;\n\t\tint i;\n\n\t\tint digestlens[MIN_KEYS_PER_CRYPT];\n\n#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT];\n\t\tunion {\n\t\t\tunsigned char *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tint sha256loops = MIN_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA256, loops = MIN_KEYS_PER_CRYPT;\n\n\t\tif (cur_salt->version == 1)\n\t\t\tloops = MIN_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA1;\n\t\telse if (cur_salt->version == 2)\n\t\t\tloops = MIN_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA512;\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tdigestlens[i] = 16;\n\t\t\tpasswordBuf = (unsigned char*)saved_key[index+i];\n\t\t\tpasswordBufSize = strlen16((UTF16*)passwordBuf) * 2;\n\n\t\t\t/* local credentials */\n\t\t\tif (cur_salt->cred_type == 1) {\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, passwordBuf, passwordBufSize);\n\t\t\t\tSHA1_Final(out[i], &ctx);\n\t\t\t\tdigestlens[i] = 20;\n\t\t\t}\n\t\t\t/* domain credentials */\n\t\t\telse if (cur_salt->cred_type == 2 || cur_salt->cred_type == 3) {\n\t\t\t\tMD4_Init(&ctx2);\n\t\t\t\tMD4_Update(&ctx2, passwordBuf, passwordBufSize);\n\t\t\t\tMD4_Final(out[i], &ctx2);\n\t\t\t\tdigestlens[i] = 16;\n\t\t\t}\n\t\t}\n\n\t\t/* 1607+ domain credentials */\n\t\t/* The key derivation algorithm is hardcoded in NtlmShared.dll!MsvpDeriveSecureCredKey */\n\t\tif(cur_salt->cred_type == 3) {\n\t\t\tsidBuf = (unsigned char*)cur_salt->SID;\n\t\t\tsidBufSize = (strlen16(cur_salt->SID) * 2);\n#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = 16;\n\t\t\t\tpin[i] = (unsigned char*)out[i];\n\t\t\t\tx.pout[i] = out2[i];\n\t\t\t}\n\n\t\t\tfor (i = 0; i < sha256loops; i++) {\n\t\t\t\tpbkdf2_sha256_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA256), &lens[i * SSE_GROUP_SZ_SHA256], sidBuf, sidBufSize, 10000, x.pout + (i * SSE_GROUP_SZ_SHA256), 32, 0);\n\t\t\t}\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = 32;\n\t\t\t\tpin[i] = (unsigned char*)out2[i];\n\t\t\t\tx.pout[i] = out[i];\n\t\t\t}\n\n\t\t\tfor (i = 0; i < sha256loops; i++) {\n\t\t\t\tpbkdf2_sha256_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA256), &lens[i * SSE_GROUP_SZ_SHA256], sidBuf, sidBufSize, 1, x.pout + (i * SSE_GROUP_SZ_SHA256), 16, 0);\n\t\t\t}\n#else\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tpbkdf2_sha256(out[i], 16, sidBuf, sidBufSize, 10000, out2[i], 32, 0);\n\t\t\t\tpbkdf2_sha256(out2[i], 32, sidBuf, sidBufSize, 1, out[i], 16, 0);\n\t\t\t}\n\n\t\t}\n\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tpasswordBuf = (unsigned char*)cur_salt->SID;\n\t\t\tpasswordBufSize = (strlen16(cur_salt->SID) + 1) * 2;\n\t\t\thmac_sha1(out[i], digestlens[i], passwordBuf, passwordBufSize, out2[i], 20);\n#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)\n\t\t\tlens[i] = 20;\n\t\t\tpin[i] = (unsigned char*)out2[i];\n\t\t\tx.pout[i] = out[i];\n\n\t\t}\n\n#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)\n\t\tif (cur_salt->version == 1)\n\t\t\tfor (i = 0; i < loops; i++)\n\t\t\t\tpbkdf2_sha1_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA1), &lens[i * SSE_GROUP_SZ_SHA1], cur_salt->iv, MAX_IV_LEN, cur_salt->pbkdf2_iterations, x.pout + (i * SSE_GROUP_SZ_SHA1), KEY_LEN1 + IV_LEN1, 0);\n\t\telse if (cur_salt->version == 2)\n\t\t\tfor (i = 0; i < loops; i++)\n\t\t\t\tpbkdf2_sha512_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA512), &lens[i * SSE_GROUP_SZ_SHA512], cur_salt->iv, MAX_IV_LEN, cur_salt->pbkdf2_iterations, x.pout + (i * SSE_GROUP_SZ_SHA512), KEY_LEN2 + IV_LEN2, 0);\n#else\n\t\tif (cur_salt->version == 1)\n\t\t\tpbkdf2_sha1(out2[0], 20, cur_salt->iv, MAX_IV_LEN, cur_salt->pbkdf2_iterations, out[0], KEY_LEN1 + IV_LEN1, 0);\n\t\telse if (cur_salt->version == 2)\n\t\t\tpbkdf2_sha512(out2[0], 20, cur_salt->iv, MAX_IV_LEN, cur_salt->pbkdf2_iterations, out[0], KEY_LEN2 + IV_LEN2, 0);\n\n\t\tif (cur_salt->version == 1) {\n\t\t\t/* decrypt will use 32 bytes, we only initialized 20 so far */\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tmemset(out2[i] + 20, 0, 32 - 20);\n\n\t\t\t\tif (decrypt_v1(out[i], out[i] + KEY_LEN1, out2[i], cur_salt->encrypted) == 0)\n\t\t\t\t\tcracked[index+i] = 1;\n\t\t\t\telse\n\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t}\n\t\t}\n\t\telse if (cur_salt->version == 2) {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tif (decrypt_v2(out[i], out[i] + KEY_LEN2, out2[i], cur_salt->encrypted) == 0)\n\t\t\t\t\tcracked[index+i] = 1;\n\t\t\t\telse\n\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/diskcryptor_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char seed[MIN_KEYS_PER_CRYPT][128];\n\t\tint i;\n#ifdef SIMD_COEF_64\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\t// kdf\n#ifdef SIMD_COEF_64\n\t\ti = 0;\n\t\tdo {\n\t\t\tlens[i] = saved_len[index+i];\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = seed[i];\n\t\t\t++i;\n\t\t} while (i < MIN_KEYS_PER_CRYPT && index+i < count);\n\t\tfor (; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = 0;\n\t\t\tpin[i] = pin[0];\n\t\t\tpout[i] = seed[i];\n\t\t}\n\t\tpbkdf2_sha512_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->saltlen, 1000, pout, 64, 0);\n#else\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tpbkdf2_sha512((unsigned char *)saved_key[index+i], saved_len[index+i], cur_salt->salt, cur_salt->saltlen, 1000, seed[i], 64, 0);\n\t\t}\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tint success = diskcryptor_decrypt_data(seed[i], cur_salt);\n\n\t\t\tif (success) {\n\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/electrum_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = *pcount;\n\tint index = 0;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char iv[16];\n\t\tunsigned char key[32];\n\t\tSHA256_CTX ctx;\n\t\tAES_KEY aes_decrypt_key;\n\t\tint extra;\n\t\tunsigned char static_privkey[MIN_KEYS_PER_CRYPT][64];\n\t\tint i, j;\n\n\t\tif (cur_salt->type == 1 || cur_salt->type == 2 || cur_salt->type == 3) {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\t\tunsigned char outbuf[48] = { 0 };\n\n\t\t\t\tSHA256_Init(&ctx);\n\t\t\t\tSHA256_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i]));\n\t\t\t\tSHA256_Final(key, &ctx);\n\t\t\t\tSHA256_Init(&ctx);\n\t\t\t\tSHA256_Update(&ctx, key, 32);\n\t\t\t\tSHA256_Final(key, &ctx);\n\t\t\t\tmemcpy(iv, cur_salt->iv, 16);\n\t\t\t\tAES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key);\n\t\t\t\tAES_cbc_encrypt(cur_salt->seed, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\n\t\t\t\tif (cur_salt->type == 1) {\n\t\t\t\t\t// check if 16 bytes of the encrypted seed are all lower-case hex (btcrecover)\n\t\t\t\t\toutbuf[16] = 0;\n\t\t\t\t\tif (hexlenl((const char*)outbuf, &extra) != 8 * 2 || extra)\n\t\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t\t\telse\n\t\t\t\t\t\tcracked[index+i] = 1;\n\t\t\t\t} else if (cur_salt->type == 2) {\n\t\t\t\t\t// check if starting 4 bytes are \"xprv\" or \"zprv\"\n\t\t\t\t\tif (memcmp(outbuf, \"xprv\", 4) && memcmp(outbuf, \"zprv\", 4)) {\n\t\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// check if remaining 12 bytes are in base58 set [1-9A-HJ-NP-Za-km-z]\n\t\t\t\t\t\tfor (j = 0; j < 12; j++) {\n\t\t\t\t\t\t\tunsigned char c = outbuf[4 + j];\n\t\t\t\t\t\t\tif ((c > 'z') || (c < '1') || ((c > '9') && (c < 'A')) || ((c > 'Z') && (c < 'a'))) {\n\t\t\t\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (j == 12)\n\t\t\t\t\t\t\tcracked[index+i] = 1;\n\t\t\t\t\t}\n\t\t\t\t} else if (cur_salt->type == 3) {\n\t\t\t\t\tunsigned char padbyte = outbuf[15];\n\t\t\t\t\t// check for valid PKCS7 padding for a 52 or 51 byte \"WIF\" private key, 64 is the original data size\n\t\t\t\t\tif (padbyte == 12 || padbyte == 13) {\n\t\t\t\t\t\tif (check_pkcs_pad(outbuf, 16, 16) < 0)\n\t\t\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\tcracked[index+i] = 1;\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if (cur_salt->type == 4 || cur_salt->type == 5) {\n\t\t\tBIGNUM *p, *q, *r;\n\t\t\tBN_CTX *ctx;\n\t\t\tunsigned char shared_pubkey[33];\n\t\t\tunsigned char keys[128];\n\t\t\tunsigned char cmac[32];\n\t\t\tsecp256k1_context *sctx;\n\t\t\tSHA512_CTX md_ctx;\n\t\t\tint shared_pubkeylen= 33;\n#ifdef SIMD_COEF_64\n\t\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\t\tpout[i] = static_privkey[i];\n\t\t\t}\n\t\t\tpbkdf2_sha512_sse((const unsigned char **)pin, len, (unsigned char*)\"\", 0, 1024, pout, 64, 0);\n#else\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\t\tpbkdf2_sha512((unsigned char *)saved_key[index+i],\n\t\t\t\t\t\tstrlen(saved_key[index+i]),\n\t\t\t\t\t\t(unsigned char*)\"\", 0, 1024,\n\t\t\t\t\t\tstatic_privkey[i], 64, 0);\n\t\t\t}\n\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\t\t// do static_privkey % GROUP_ORDER\n\t\t\t\tp = BN_bin2bn(static_privkey[i], 64, NULL);\n\t\t\t\tq = BN_new();\n\t\t\t\tr = BN_new();\n\t\t\t\tBN_hex2bn(&q, group_order);\n\t\t\t\tctx = BN_CTX_new();\n\t\t\t\tBN_mod(r, p, q, ctx);\n\t\t\t\tBN_CTX_free(ctx);\n\t\t\t\tBN_free(p);\n\t\t\t\tBN_free(q);\n\t\t\t\tBN_bn2binpad32(r, static_privkey[i]);\n\t\t\t\tBN_free(r);\n\t\t\t\tsctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE);\n\t\t\t\t// multiply point with a scaler, shared_pubkey is compressed representation\n\t\t\t\tsecp256k1_mul(sctx, shared_pubkey, &cur_salt->pubkey, static_privkey[i]);\n\t\t\t\tsecp256k1_context_destroy(sctx);\n\t\t\t\tSHA512_Init(&md_ctx);\n\t\t\t\tSHA512_Update(&md_ctx, shared_pubkey, shared_pubkeylen);\n\t\t\t\tSHA512_Final(keys, &md_ctx);\n\t\t\t\tif (cur_salt->type == 4) {\n\t\t\t\t\t// calculate mac of data\n\t\t\t\t\thmac_sha256(keys + 32, 32, cur_salt->data, cur_salt->datalen, cmac, 32);\n\t\t\t\t\tif (memcmp(&cur_salt->mac, cmac, 16) == 0)\n\t\t\t\t\t\tcracked[index+i] = 1;\n\t\t\t\t\telse\n\t\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t\t} else if (cur_salt->type == 5) {\n\t\t\t\t\t z_stream z;\n\t\t\t\t\t\tunsigned char iv[16];\n\t\t\t\t\t\tunsigned char out[512] = { 0 };\n\t\t\t\t\t\tunsigned char fout[512] = { 0 };\n\t\t\t\t\t\tAES_KEY aes_decrypt_key;\n\n\t\t\t\t\t\t// common zlib settings\n\t\t\t\t\t\tz.zalloc = Z_NULL;\n\t\t\t\t\t\tz.zfree = Z_NULL;\n\t\t\t\t\t\tz.opaque = Z_NULL;\n\t\t\t\t\t\tz.avail_in = 512;\n\t\t\t\t\t\tz.avail_out = 512;\n\t\t\t\t\t\tz.next_out = fout;\n\n\t\t\t\t\t\tmemcpy(iv, keys, 16);\n\t\t\t\t\t\t// fast zlib based rejection test, is this totally safe?\n\t\t\t\t\t\tAES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key);\n\t\t\t\t\t\tAES_cbc_encrypt(cur_salt->data, out, 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\t\t\t\t\t\tif ((memcmp(out, \"\\x78\\x9c\", 2) != 0) || (((out[2] & 0x7) != 0x4) && ((out[2] & 0x7) != 0x5))) {\n\t\t\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tAES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key);\n\t\t\t\t\t\t\tAES_cbc_encrypt(cur_salt->data + 16, out + 16, 512 - 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\t\t\t\t\t\t\tz.next_in = out;\n\t\t\t\t\t\t\tinflateInit2(&z, 15);\n\t\t\t\t\t\t\tinflate(&z, Z_NO_FLUSH);\n\t\t\t\t\t\t\tinflateEnd(&z);\n\t\t\t\t\t\t\tif ((memcmp(fout, EXPECTED_BYTES_1, 7) == 0) || (memcmp(fout, EXPECTED_BYTES_2, 8) == 0))\n\t\t\t\t\t\t\t\tcracked[index+i] = 1;\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\tcracked[index+i] = 0;\n\t\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/IPB2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "truct db_salt *salt)\n{\n\tconst int count = *pcount;\n#ifdef SIMD_COEF_32\n#if defined(_OPENMP)\n\tint t;\nfor (t = 0; t < threads; t++)\n#define ti (t*NBKEYS+index)\n#else\n#define t 0\n#define ti index\n\n\t{\n\t\tunsigned int index, i;\n\n\t\tif (new_salt)\n\t\tfor (index = 0; index < NBKEYS; index++) {\n\t\t\tconst uint32_t *sp = cur_salt;\n#if ARCH_LITTLE_ENDIAN\n\t\t\tuint32_t *kb = (uint32_t*)&saved_key[GETPOS(0, ti)];\n\t\t\tfor (i = 0; i < MD5_HEX_SIZE / 4; i++, kb += SIMD_COEF_32)\n\t\t\t\t*kb = *sp++;\n#else\n\t\t\tuint32_t *kb = (uint32_t*)&saved_key[GETPOS(3, ti)];\n\t\t\tfor (i = 0; i < MD5_HEX_SIZE / 4; i++, kb += SIMD_COEF_32)\n\t\t\t\t*kb = JOHNSWAP(*sp++);\n\n\t\t}\n\n\t\tif (new_key)\n\t\tfor (index = 0; index < NBKEYS; index++) {\n\t\t\tconst uint32_t *key = (uint32_t*)saved_plain[ti];\n\t\t\tint len = 0, temp;\n#if ARCH_LITTLE_ENDIAN\n\t\t\tuint32_t *kb = (uint32_t*)&key_buf[GETPOS(0, ti)];\n\t\t\tuint32_t *keybuffer = kb;\n\n\t\t\twhile((unsigned char)(temp = *key++)) {\n\t\t\t\tif (!(temp & 0xff00)) {\n\t\t\t\t\t*kb = (unsigned char)temp | (0x80 << 8);\n\t\t\t\t\tlen++;\n\t\t\t\t\tgoto key_cleaning;\n\t\t\t\t}\n\t\t\t\tif (!(temp & 0xff0000)) {\n\t\t\t\t\t*kb = (unsigned short)temp | (0x80 << 16);\n\t\t\t\t\tlen+=2;\n\t\t\t\t\tgoto key_cleaning;\n\t\t\t\t}\n\t\t\t\tif (!(temp & 0xff000000)) {\n\t\t\t\t\t*kb = temp | (0x80U << 24);\n\t\t\t\t\tlen+=3;\n\t\t\t\t\tgoto key_cleaning;\n\t\t\t\t}\n\t\t\t\t*kb = temp;\n#else\n\t\t\tuint32_t *kb = (uint32_t*)&key_buf[GETPOS(3, ti)];\n\t\t\tuint32_t *keybuffer = kb;\n\n\t\t\twhile((temp = *key++) & 0xff000000) {\n\t\t\t\tif (!(temp & 0xff0000))\n\t\t\t\t{\n\t\t\t\t\t*kb = JOHNSWAP((temp & 0xff000000) | (0x80 << 16));\n\t\t\t\t\tlen++;\n\t\t\t\t\tgoto key_cleaning;\n\t\t\t\t}\n\t\t\t\tif (!(temp & 0xff00))\n\t\t\t\t{\n\t\t\t\t\t*kb = JOHNSWAP((temp & 0xffff0000) | (0x80 << 8));\n\t\t\t\t\tlen+=2;\n\t\t\t\t\tgoto key_cleaning;\n\t\t\t\t}\n\t\t\t\tif (!(temp & 0xff))\n\t\t\t\t{\n\t\t\t\t\t*kb = JOHNSWAP(temp | 0x80U);\n\t\t\t\t\tlen+=3;\n\t\t\t\t\tgoto key_cleaning;\n\t\t\t\t}\n\t\t\t\t*kb = JOHNSWAP(temp);\n\n\n\t\t\t\tlen += 4;\n\t\t\t\tkb += SIMD_COEF_32;\n\t\t\t}\n\t\t\t*kb = 0x00000080;\n\nkey_cleaning:\n\t\t\tkb += SIMD_COEF_32;\n\t\t\twhile(*kb) {\n\t\t\t\t*kb = 0;\n\t\t\t\tkb += SIMD_COEF_32;\n\t\t\t}\n\t\t\tkeybuffer[14*SIMD_COEF_32] = len << 3;\n\t\t}\n\n\t\tSIMDmd5body(&key_buf[t*NBKEYS*64], (unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);\n\t\tfor (index = 0; index < NBKEYS; index++) {\n\t\t\t// Somehow when I optimised this it got faster in Valgrind but slower IRL\n\t\t\tfor (i = 0; i < BINARY_SIZE; i++) {\n\t\t\t\tunsigned char v = crypt_key[GETOUTPOS(i, ti)];\n\t\t\t\tsaved_key[GETPOS(MD5_HEX_SIZE + 2 * i, ti)] = itoa16_shr_04[ARCH_INDEX(v)];\n\t\t\t\tsaved_key[GETPOS(MD5_HEX_SIZE + 2 * i + 1, ti)] = itoa16_and_0f[ARCH_INDEX(v)];\n\t\t\t}\n\t\t}\n\n\t\tSIMDmd5body(&saved_key[t*NBKEYS*64], (unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);\n\t\tSIMDmd5body(empty_key, (unsigned int*)&crypt_key[t*NBKEYS*16], (unsigned int*)&crypt_key[t*NBKEYS*16], SSEi_RELOAD|SSEi_MIXED_IN);\n\t}\n\t//dump_stuff_mmx_msg(\"\\nfinal \", saved_key, 64, count-1);\n\t//dump_out_mmx_msg(\"result\", crypt_key, 16, count-1);\n\tnew_salt = new_key = 0;\n\n#else\n\n#ifdef _OPENMP\n\tint index;\n#pragma omp parallel for\n\tfor (index = 0; index < count; index++)\n#else\n#define index\t0\n\n\t{\n\t\tMD5_CTX ctx;\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, saved_key[index], MD5_HEX_SIZE * 2);\n\t\tMD5_Final((unsigned char*)crypt_key[index], &ctx);\n\t}\n#undef index\n\n\treturn count;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/IPB2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_msg(\"result\", crypt_key, 16, count-1);\n\tnew_salt = new_key = 0;\n\n#else\n\n#ifdef _OPENMP\n\tint index;\nfor (index = 0; index < count; index++)\n#else\n#define index\t0\n\n\t{\n\t\tMD5_CTX ctx;\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, saved_key[index], MD5_HEX_SIZE * 2);\n\t\tMD5_Final((unsigned char*)crypt_key[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/cq_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++)\n\t\tcrypt_key[index] = AdProcessPassword(saved_key[index]);\n\n\treturn count;\n}\n\nstatic int cmp_all(void *binary, int count)\n{\n\tint i;\n\n\tfor (i = 0; i < count; ++i)\n\t\tif (*(uint32_t *)binary == crypt_key[i])\n\t\t\treturn 1;\n\n\treturn 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/hmacSHA1_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "crypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#if _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tif (new_keys) {\n\t\t\tSIMDSHA1body(&ipad[index * SHA_BUF_SIZ * 4],\n\t\t\t (unsigned int*)&prep_ipad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN);\n\t\t\tSIMDSHA1body(&opad[index * SHA_BUF_SIZ * 4],\n\t\t\t (unsigned int*)&prep_opad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN);\n\t\t}\n\t\tSIMDSHA1body(cur_salt,\n\t\t (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],\n\t\t (unsigned int*)&prep_ipad[index * BINARY_SIZE],\n\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);\n\t\tSIMDSHA1body(&crypt_key[index * SHA_BUF_SIZ * 4],\n\t\t (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],\n\t\t (unsigned int*)&prep_opad[index * BINARY_SIZE],\n\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);\n#else\n\t\tSHA_CTX ctx;\n\n\t\tif (new_keys) {\n\t\t\tSHA1_Init(&ipad_ctx[index]);\n\t\t\tSHA1_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);\n\t\t\tSHA1_Init(&opad_ctx[index]);\n\t\t\tSHA1_Update(&opad_ctx[index], opad[index], PAD_SIZE);\n\t\t}\n\n\t\tmemcpy(&ctx, &ipad_ctx[index], sizeof(ctx));\n\t\tSHA1_Update(&ctx, cur_salt, strlen((char*)cur_salt));\n\t\tSHA1_Final((unsigned char*) crypt_key[index], &ctx);\n\n\t\tmemcpy(&ctx, &opad_ctx[index], sizeof(ctx));\n\t\tSHA1_Update(&ctx, crypt_key[index], BINARY_SIZE);\n\t\tSHA1_Final((unsigned char*) crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/cryptosafe_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ") {\n\t\tmemset(cracked, 0, sizeof(cracked[0]) * cracked_count);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tAES_KEY aes_decrypt_key;\n\t\tunsigned char plain[16], iv[16] = { 0 };\n\n\t\tAES_set_decrypt_key((unsigned char*)saved_key[index], 256, &aes_decrypt_key);\n\t\tAES_cbc_encrypt(cur_salt->ciphertext, plain, 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\t\tif (!memcmp(plain, \"[{\\\"coinName\\\":\\\"\", 14)) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t}\n\treturn count;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rakp_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "crypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#if _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tif (new_keys) {\n\t\t\tSIMDSHA1body(&ipad[index * SHA_BUF_SIZ * 4],\n\t\t\t (unsigned int*)&prep_ipad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN);\n\t\t\tSIMDSHA1body(&opad[index * SHA_BUF_SIZ * 4],\n\t\t\t (unsigned int*)&prep_opad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN);\n\t\t}\n\t\tSIMDSHA1body(cur_salt[0],\n\t\t (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],\n\t\t (unsigned int*)&prep_ipad[index * BINARY_SIZE],\n\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);\n\t\tSIMDSHA1body(cur_salt[1],\n\t\t (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],\n\t\t (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],\n\t\t SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT);\n\t\tSIMDSHA1body(&crypt_key[index * SHA_BUF_SIZ * 4],\n\t\t (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],\n\t\t (unsigned int*)&prep_opad[index * BINARY_SIZE],\n\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);\n#else\n\t\tSHA_CTX ctx;\n\n\t\tif (new_keys) {\n\t\t\tSHA1_Init(&ipad_ctx[index]);\n\t\t\tSHA1_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);\n\t\t\tSHA1_Init(&opad_ctx[index]);\n\t\t\tSHA1_Update(&opad_ctx[index], opad[index], PAD_SIZE);\n\t\t}\n\n\t\tmemcpy(&ctx, &ipad_ctx[index], sizeof(ctx));\n\t\tSHA1_Update(&ctx, cur_salt.salt, cur_salt.length);\n\t\tSHA1_Final((unsigned char*) crypt_key[index], &ctx);\n\n\t\tmemcpy(&ctx, &opad_ctx[index], sizeof(ctx));\n\t\tSHA1_Update(&ctx, crypt_key[index], BINARY_SIZE);\n\t\tSHA1_Final((unsigned char*) crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/tezos_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "endif\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunion {\n\t\t\tunsigned char seed[64];\n\t\t\ted25519_secret_key sk;\n\t\t} seed[MIN_KEYS_PER_CRYPT];\n\t\tchar salt[MIN_KEYS_PER_CRYPT][16 + 256 + PLAINTEXT_LENGTH];\n\t\tint i;\n#ifdef SIMD_COEF_64\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tint slens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *sin[MIN_KEYS_PER_CRYPT];\n\n\t\t// create varying salt(s)\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tmemcpy(salt[i], \"mnemonic\", 8);\n\t\t\tmemcpy(salt[i] + 8, cur_salt->email, cur_salt->email_length + 1);\n\t\t\tstrcat(salt[i], saved_key[index+i]);\n\t\t}\n\n\t\t// kdf\n#ifdef SIMD_COEF_64\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = cur_salt->mnemonic_length;\n\t\t\tpin[i] = (unsigned char*)cur_salt->mnemonic;\n\t\t\tsin[i] = (unsigned char*)salt[i];\n\t\t\tpout[i] = seed[i].seed;\n\t\t\tslens[i] = strlen(salt[i]);\n\t\t\tif (!warned && !self_test_running && slens[i] > PBKDF2_64_MAX_SALT_SIZE) {\n#ifdef _OPENMP\n#pragma omp critical\n\n\t\t\t\t{\n\t\t\t\t\twarned = 1;\n\t\t\t\t\tfprintf(stderr,\n\t\t\t\t\t\t\"Warning: over-long combination(s) of e-mail address and candidate password\\n\");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tpbkdf2_sha512_sse_varying_salt((const unsigned char**)pin, lens, sin, slens, 2048, pout, 64, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tpbkdf2_sha512((unsigned char*)cur_salt->mnemonic,\n\t\t\t\t\tcur_salt->mnemonic_length, (unsigned char*)salt[i], strlen(salt[i]), 2048,\n\t\t\t\t\tseed[i].seed, 64, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tunsigned char buffer[20];\n\t\t\ted25519_public_key pk;\n\n\t\t\t// asymmetric stuff\n\t\t\ted25519_publickey(seed[i].sk, pk);\n\n\t\t\tblake2b((uint8_t *)buffer, (unsigned char*)pk, NULL, 20, 32, 0); // pk is pkh (pubkey hash)\n\n\t\t\tif (!memcmp(cur_salt->raw_address + 2, buffer, 20)) {\n\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pgpdisk_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char key[40]; // multiple of 20 needed for pgpdisk_kdf()\n\n\t\tif (cur_salt->algorithm == 5 || cur_salt->algorithm == 6 || cur_salt->algorithm == 7) {\n\t\t\tAES_KEY aes_key;\n\n\t\t\tpgpdisk_kdf(saved_key[index], cur_salt->salt, key, 32);\n\t\t\t// DecryptPassphraseKey in original source code, compute CheckBytes\n\t\t\tAES_set_encrypt_key(key, 256, &aes_key);\n\t\t\tAES_ecb_encrypt(key, (unsigned char*)crypt_out[index], &aes_key, AES_ENCRYPT);\n\t\t} else if (cur_salt->algorithm == 4) {\n\t\t\tTwofish_key tkey;\n\n\t\t\tpgpdisk_kdf(saved_key[index], cur_salt->salt, key, 32);\n\t\t\tTwofish_prepare_key(key, 32, &tkey);\n\t\t\tTwofish_encrypt(&tkey, key, (unsigned char*)crypt_out[index]);\n\t\t} else if (cur_salt->algorithm == 3) {\n\t\t\tCAST_KEY ck;\n\n\t\t\tpgpdisk_kdf(saved_key[index], cur_salt->salt, key, 16);\n\t\t\tCAST_set_key(&ck, 16, key);\n\t\t\tmemset((unsigned char*)crypt_out[index], 0, BINARY_SIZE);\n\t\t\tCAST_ecb_encrypt(key, (unsigned char*)crypt_out[index], &ck, CAST_ENCRYPT);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/snmp_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ount = *pcount;\n\tint index;\n\n\tmemset(cracked, 0, sizeof(cracked[0])*cracked_count);\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tHMACMD5Context ctx;\n\t\tunsigned char authKey[20];\n\t\tunsigned char out[20];\n\n/*\n * Missed optimization potential:\n * This should be re-worked to cache authKey (in global malloc'ed arrays) for\n * the MD5 and SHA-1 variations of the algorithm if/as they're first computed\n * and then reuse them for further salts.\n */\n\t\tif (cur_salt->authProtocol == 1) {\n\t\t\tsnmp_usm_password_to_key_md5((const uint8_t *)saved_key[index],\n\t\t\t\t\tstrlen(saved_key[index]),\n\t\t\t\t\tcur_salt->engineID,\n\t\t\t\t\tcur_salt->engineLength, authKey);\n\t\t\thmac_md5_init_rfc2104(authKey, 16, &ctx);\n\t\t\thmac_md5_update(cur_salt->salt, cur_salt->salt_length, &ctx);\n\t\t\thmac_md5_final(out, &ctx);\n\t\t\tif (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0)\n\t\t\t\tcracked[index] = 1;\n\t\t\telse\n\t\t\t\tcracked[index] = 0;\n\t\t} else if (cur_salt->authProtocol == 2) {\n\t\t\tsnmp_usm_password_to_key_sha((const uint8_t *)saved_key[index],\n\t\t\t\t\tstrlen(saved_key[index]),\n\t\t\t\t\tcur_salt->engineID,\n\t\t\t\t\tcur_salt->engineLength, authKey);\n\t\t\thmac_sha1(authKey, 20, cur_salt->salt, cur_salt->salt_length, out, 12);\n\t\t\tif (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0)\n\t\t\t\tcracked[index] = 1;\n\t\t\telse\n\t\t\t\tcracked[index] = 0;\n\t\t} else if (cur_salt->authProtocol == 0) {\n\t\t\tcracked[index] = 0;\n\t\t\tsnmp_usm_password_to_key_md5((const uint8_t *)saved_key[index],\n\t\t\t\t\tstrlen(saved_key[index]),\n\t\t\t\t\tcur_salt->engineID,\n\t\t\t\t\tcur_salt->engineLength, authKey);\n\t\t\thmac_md5_init_rfc2104(authKey, 16, &ctx);\n\t\t\thmac_md5_update(cur_salt->salt, cur_salt->salt_length, &ctx);\n\t\t\thmac_md5_final(out, &ctx);\n\t\t\tif (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0) {\n\t\t\t\tcracked[index] = 1;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tsnmp_usm_password_to_key_sha((const uint8_t *)saved_key[index],\n\t\t\t\t\tstrlen(saved_key[index]),\n\t\t\t\t\tcur_salt->engineID,\n\t\t\t\t\tcur_salt->engineLength, authKey);\n\t\t\thmac_sha1(authKey, 20, cur_salt->salt, cur_salt->salt_length, out, 12);\n\t\t\tif (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0)\n\t\t\t\tcracked[index] = 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/bitshares_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tSHA512_CTX ctx;\n\t\tunsigned char km[64];\n\t\tAES_KEY aes_decrypt_key;\n\t\tunsigned char out[MAX_CIPHERTEXT_LENGTH];\n\t\tunsigned char iv[16] = { 0 }; // does not matter\n\n\t\tif (cur_salt->type == 0) {\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, saved_key[index], saved_len[index]);\n\t\t\tSHA512_Final(km, &ctx);\n\n\t\t\tAES_set_decrypt_key(km, 256, &aes_decrypt_key);\n\t\t\tAES_cbc_encrypt(cur_salt->ct + cur_salt->ctlen - 32, out, 32, &aes_decrypt_key, iv, AES_DECRYPT);\n\n\t\t\tif (memcmp(out + 16, \"\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\\x10\", 16) == 0) {\n\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t} else {\n\t\t\tsecp256k1_context *ctxs;\n\t\t\tsecp256k1_pubkey pubkey;\n\t\t\tSHA256_CTX sctx;\n\t\t\tunsigned char output[128];\n\t\t\tsize_t outlen = 33;\n\t\t\tint padbyte;\n\t\t\tint dlen = cur_salt->ctlen - outlen;\n\n\t\t\tSHA256_Init(&sctx);\n\t\t\tSHA256_Update(&sctx, saved_key[index], saved_len[index]);\n\t\t\tSHA256_Final(km, &sctx);\n\n\t\t\tctxs = secp256k1_context_create(SECP256K1_CONTEXT_NONE);\n\t\t\tsecp256k1_ec_pubkey_parse(ctxs, &pubkey, cur_salt->ct, 33);\n\t\t\tsecp256k1_ec_pubkey_tweak_mul(ctxs, &pubkey, km);\n\t\t\tsecp256k1_ec_pubkey_serialize(ctxs, output, &outlen, &pubkey, SECP256K1_EC_UNCOMPRESSED);\n\t\t\tsecp256k1_context_destroy(ctxs);\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, output + 1, 32);\n\t\t\tSHA512_Final(km, &ctx);\n\t\t\thex_encode(km, 64, output);\n\n\t\t\tSHA512_Init(&ctx);\n\t\t\tSHA512_Update(&ctx, output, 128);\n\t\t\tSHA512_Final(km, &ctx);\n\t\t\tAES_set_decrypt_key(km, 256, &aes_decrypt_key);\n\t\t\tAES_cbc_encrypt(cur_salt->ct + 33, out, dlen, &aes_decrypt_key, km + 32, AES_DECRYPT);\n\n\t\t\tpadbyte = out[dlen - 1];\n\t\t\tif (padbyte <= 16) {\n\t\t\t\t// check padding!\n\t\t\t\tif (check_pkcs_pad(out, dlen, 16) >= 0) {\n\t\t\t\t\t// check checksum\n\t\t\t\t\tSHA256_Init(&sctx);\n\t\t\t\t\tSHA256_Update(&sctx, out + 4, dlen - 4 - padbyte);\n\t\t\t\t\tSHA256_Final(km, &sctx);\n\t\t\t\t\tif (memcmp(km, out, 4) == 0) {\n\t\t\t\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\t\t\tany_cracked |= 1;\n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sapG_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nst int count = *pcount;\n#if SIMD_COEF_32\n#define ti (t*NBKEYS+index)\n\n\tint t;\n#if defined(_OPENMP)\nfor (t = 0; t < (count-1)/(NBKEYS)+1; t++) {\n\t\tunsigned int index, i, longest;\n\t\tint len;\n\t\tunsigned int crypt_len[NBKEYS];\n\n\t\tlongest = 0;\n\n\t\tfor (index = 0; index < NBKEYS; index++) {\n\n\t\t\t// Store key into vector key buffer\n\t\t\tif ((len = keyLen[ti]) < 0) {\n\t\t\t\tuint32_t *keybuf_word = (uint32_t*)&saved_key[0][GETSTARTPOS(ti)];\n#if ARCH_ALLOWS_UNALIGNED\n\t\t\t\tconst uint32_t *wkey = (uint32_t*)saved_plain[ti];\n#else\n\t\t\t\tchar buf_aligned[UTF8_PLAINTEXT_LENGTH + 1] JTR_ALIGN(4);\n\t\t\t\tchar *key = (char*)saved_plain[ti];\n\t\t\t\tconst uint32_t *wkey = is_aligned(key, 4) ?\n\t\t\t\t\t\t(uint32_t*)key : (uint32_t*)strcpy(buf_aligned, key);\n\n\t\t\t\tuint32_t temp;\n\n\t\t\t\tlen = 0;\n#if ARCH_LITTLE_ENDIAN\n\t\t\t\twhile(((unsigned char)(temp = *wkey++))) {\n\t\t\t\t\tif (!(temp & 0xff00))\n\t\t\t\t\t{\n\t\t\t\t\t\t*keybuf_word = JOHNSWAP(temp & 0xff);\n\t\t\t\t\t\tlen++;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\tif (!(temp & 0xff0000))\n\t\t\t\t\t{\n\t\t\t\t\t\t*keybuf_word = JOHNSWAP(temp & 0xffff);\n\t\t\t\t\t\tlen+=2;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\t*keybuf_word = JOHNSWAP(temp);\n\t\t\t\t\tif (!(temp & 0xff000000))\n\t\t\t\t\t{\n\t\t\t\t\t\tlen+=3;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n#else\n\t\t\t\twhile((temp = *wkey++) & 0xff000000) {\n\t\t\t\t\tif (!(temp & 0xff0000))\n\t\t\t\t\t{\n\t\t\t\t\t\t*keybuf_word = (temp & 0xff000000) | (0x80 << 16);\n\t\t\t\t\t\tlen++;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\tif (!(temp & 0xff00))\n\t\t\t\t\t{\n\t\t\t\t\t\t*keybuf_word = (temp & 0xffff0000) | (0x80 << 8);\n\t\t\t\t\t\tlen+=2;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\t*keybuf_word = temp;\n\t\t\t\t\tif (!(temp & 0xff))\n\t\t\t\t\t{\n\t\t\t\t\t\t*keybuf_word = temp | 0x80U;\n\t\t\t\t\t\tlen+=3;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tlen += 4;\n\t\t\t\t\tif (len & 63)\n\t\t\t\t\t\tkeybuf_word += SIMD_COEF_32;\n\t\t\t\t\telse\n\t\t\t\t\t\tkeybuf_word = (uint32_t*)&saved_key[len>>6][GETSTARTPOS(ti)];\n\t\t\t\t}\n\n\t\t\t\t// Back-out of trailing spaces\n\t\t\t\twhile(len && saved_plain[ti][len - 1] == ' ')\n\t\t\t\t\tsaved_plain[ti][--len] = 0;\n\t\t\t\tkeyLen[ti] = len;\n\t\t\t}\n\n\t\t\t// 1.\twe need to SHA1 the password and username\n\t\t\tfor (i = 0; i < cur_salt->l; i++)\n\t\t\t\tsaved_key[(len+i)>>6][GETPOS((len + i), ti)] = cur_salt->s[i];\n\t\t\tlen += i;\n\n\t\t\tsaved_key[len>>6][GETPOS(len, ti)] = 0x80;\n\n\t\t\t// Clean rest of this buffer\n\t\t\ti = len;\n\t\t\twhile (++i & 3)\n\t\t\t\tsaved_key[i>>6][GETPOS(i, ti)] = 0;\n\t\t\tfor (; i < (((len+8)>>6)+1)*64; i += 4)\n\t\t\t\t*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0;\n\n\t\t\t// This should do good but Valgrind insists it's a waste\n\t\t\t//if (clean_pos[ti] < i)\n\t\t\t//\tclean_pos[ti] = len + 1;\n\n\t\t\tif (len > longest)\n\t\t\t\tlongest = len;\n\t\t\t((unsigned int*)saved_key[(len+8)>>6])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3;\n\t\t\tcrypt_len[index] = len;\n\t\t}\n\n\t\tSIMDSHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&crypt_key[t*20*NBKEYS], NULL, SSEi_MIXED_IN);\n\n\t\t// Do another and possibly a third limb\n\t\tmemcpy(&interm_crypt[t*20*NBKEYS], &crypt_key[t*20*NBKEYS], 20*NBKEYS);\n\t\tfor (i = 1; i < (((longest + 8) >> 6) + 1); i++) {\n\t\t\tSIMDSHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD);\n\t\t\t// Copy any output that is done now\n\t\t\tfor (index = 0; index < NBKEYS; index++)\n\t\t\t\tif (((crypt_len[index] + 8) >> 6) == i)\n\t\t\t\t\tcrypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti);\n\t\t}\n\n\t\tlongest = 0;\n\n\t\tfor (index = 0; index < NBKEYS; index++) {\n\t\t\tunsigned int offsetMagicArray;\n\t\t\tunsigned int lengthIntoMagicArray;\n\t\t\tconst unsigned char *p;\n\t\t\tint i;\n\n\t\t\t// If final crypt ends up to be 56-61 bytes (or so), this must be clean\n\t\t\tfor (i = 0; i < LIMB; i++)\n\t\t\t\tif (keyLen[ti] < i * 64 + 55)\n\t\t\t\t\t((unsigned int*)saved_key[i])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = 0;\n\n\t\t\tlen = keyLen[ti];\n\t\t\tlengthIntoMagicArray = extractLengthOfMagicArray(crypt_key, ti);\n\t\t\toffsetMagicArray = extractOffsetToMagicArray(crypt_key, ti);\n\n\t\t\t// 2.\tnow, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode...\n\t\t\ti = len - 1;\n\t\t\tp = &theMagicArray[offsetMagicArray];\n\t\t\t// Copy a char at a time until aligned (at destination)...\n\t\t\twhile (++i & 3)\n\t\t\t\tsaved_key[i>>6][GETPOS(i, ti)] = *p++;\n\t\t\t// ...then a word at a time. This is a good boost, we are copying between 32 and 82 bytes here.\n#if ARCH_ALLOWS_UNALIGNED\n\t\t\tfor (;i < lengthIntoMagicArray + len; i += 4, p += 4)\n#if ARCH_LITTLE_ENDIAN\n\t\t\t\t*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = JOHNSWAP(*(uint32_t*)p);\n#else\n\t\t\t\t*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = *(uint32_t*)p;\n\n#else\n\t\t\tfor (;i < lengthIntoMagicArray + len; ++i, ++p) {\n\t\t\t\tsaved_key[i>>6][GETPOS(i, ti)] = *p;\n\t\t\t}\n\n\n\t\t\t// Now, the salt. This is typically too short for the stunt above.\n\t\t\tfor (i = 0; i < cur_salt->l; i++)\n\t\t\t\tsaved_key[(len+lengthIntoMagicArray+i)>>6][GETPOS((len + lengthIntoMagicArray + i), ti)] = cur_salt->s[i];\n\t\t\tlen += lengthIntoMagicArray + cur_salt->l;\n\t\t\tsaved_key[len>>6][GETPOS(len, ti)] = 0x80;\n\t\t\tcrypt_len[index] = len;\n\n\t\t\t// Clean the rest of this buffer as needed\n\t\t\ti = len;\n\t\t\twhile (++i & 3)\n\t\t\t\tsaved_key[i>>6][GETPOS(i, ti)] = 0;\n\t\t\tfor (; i < clean_pos[ti]; i += 4)\n\t\t\t\t*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0;\n\n\t\t\tclean_pos[ti] = len + 1;\n\t\t\tif (len > longest)\n\t\t\t\tlongest = len;\n\n\t\t\t((unsigned int*)saved_key[(len+8)>>6])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3;\n\t\t}\n\n\t\tSIMDSHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], NULL, SSEi_MIXED_IN);\n\n\t\t// Typically, no or very few crypts are done at this point so this is faster than to memcpy the lot\n\t\tfor (index = 0; index < NBKEYS; index++)\n\t\t\tif (crypt_len[index] < 56)\n\t\t\t\tcrypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti);\n\n\t\t// Do another and possibly a third, fourth and fifth limb\n\t\tfor (i = 1; i < (((longest + 8) >> 6) + 1); i++) {\n\t\t\tSIMDSHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD);\n\t\t\t// Copy any output that is done now\n\t\t\tfor (index = 0; index < NBKEYS; index++)\n\t\t\t\tif (((crypt_len[index] + 8) >> 6) == i)\n\t\t\t\t\tcrypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti);\n\t\t}\n\t}\n#undef t\n#undef ti\n\n#else\n\n\tint index;\n#ifdef _OPENMP\n#pragma omp parallel for\n\n\tfor (index = 0; index < count; index++) {\n\t\tunsigned int offsetMagicArray, lengthIntoMagicArray;\n\t\tunsigned char temp_key[BINARY_SIZE];\n\t\tunsigned char tempVar[UTF8_PLAINTEXT_LENGTH + MAGIC_ARRAY_SIZE + SALT_LENGTH]; //max size...\n\t\tSHA_CTX ctx;\n\n\t\tif (keyLen[index] < 0) {\n\t\t\tkeyLen[index] = strlen((char*)saved_key[index]);\n\n\t\t\t// Back-out of trailing spaces\n\t\t\twhile (keyLen[index] && saved_key[index][keyLen[index] - 1] == ' ') {\n\t\t\t\tsaved_key[index][--keyLen[index]] = 0;\n\t\t\t\tif (keyLen[index] == 0) break;\n\t\t\t}\n\t\t}\n\n\t\t//1.\twe need to SHA1 the password and username\n\t\tmemcpy(tempVar, saved_key[index], keyLen[index]); //first: the password\n\t\tmemcpy(tempVar + keyLen[index], cur_salt->s, cur_salt->l); //second: the salt(username)\n\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, tempVar, keyLen[index] + cur_salt->l);\n\t\tSHA1_Final((unsigned char*)temp_key, &ctx);\n\n\t\tlengthIntoMagicArray = extractLengthOfMagicArray(temp_key);\n\t\toffsetMagicArray = extractOffsetToMagicArray(temp_key);\n\n\t\t//2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode...\n\t\tmemcpy(tempVar + keyLen[index], &theMagicArray[offsetMagicArray], lengthIntoMagicArray);\n\t\tmemcpy(tempVar + keyLen[index] + lengthIntoMagicArray, cur_salt->s, cur_salt->l);\n\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, tempVar, keyLen[index] + lengthIntoMagicArray + cur_salt->l);\n\t\tSHA1_Final((unsigned char*)crypt_key[index], &ctx);\n\t}\n#undef index\n\n\n\treturn count;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sapG_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_crypt, (unsigned int*)crypt_key, ti);\n\t\t}\n\t}\n#undef t\n#undef ti\n\n#else\n\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned int offsetMagicArray, lengthIntoMagicArray;\n\t\tunsigned char temp_key[BINARY_SIZE];\n\t\tunsigned char tempVar[UTF8_PLAINTEXT_LENGTH + MAGIC_ARRAY_SIZE + SALT_LENGTH]; //max size...\n\t\tSHA_CTX ctx;\n\n\t\tif (keyLen[index] < 0) {\n\t\t\tkeyLen[index] = strlen((char*)saved_key[index]);\n\n\t\t\t// Back-out of trailing spaces\n\t\t\twhile (keyLen[index] && saved_key[index][keyLen[index] - 1] == ' ') {\n\t\t\t\tsaved_key[index][--keyLen[index]] = 0;\n\t\t\t\tif (keyLen[index] == 0) break;\n\t\t\t}\n\t\t}\n\n\t\t//1.\twe need to SHA1 the password and username\n\t\tmemcpy(tempVar, saved_key[index], keyLen[index]); //first: the password\n\t\tmemcpy(tempVar + keyLen[index], cur_salt->s, cur_salt->l); //second: the salt(username)\n\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, tempVar, keyLen[index] + cur_salt->l);\n\t\tSHA1_Final((unsigned char*)temp_key, &ctx);\n\n\t\tlengthIntoMagicArray = extractLengthOfMagicArray(temp_key);\n\t\toffsetMagicArray = extractOffsetToMagicArray(temp_key);\n\n\t\t//2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode...\n\t\tmemcpy(tempVar + keyLen[index], &theMagicArray[offsetMagicArray], lengthIntoMagicArray);\n\t\tmemcpy(tempVar + keyLen[index] + lengthIntoMagicArray, cur_salt->s, cur_salt->l);\n\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, tempVar, keyLen[index] + lengthIntoMagicArray + cur_salt->l);\n\t\tSHA1_Final((unsigned char*)crypt_key[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/blockchain_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tint lens[MIN_KEYS_PER_CRYPT], i;\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens,\n\t\t\tcur_salt->data, 16, cur_salt->iter, pout, 32, 0);\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tif (blockchain_decrypt(master[i], cur_salt->data) == 0)\n\t\t\t\tcracked[i+index] = 1;\n\t\t\telse\n\t\t\t\tcracked[i+index] = 0;\n\t\t}\n#else\n\t\tunsigned char master[32];\n\t\tpbkdf2_sha1((unsigned char *)saved_key[index],\n\t\t\tstrlen(saved_key[index]),\n\t\t\tcur_salt->data, 16,\n\t\t\tcur_salt->iter, master, 32, 0);\n\t\tif (blockchain_decrypt(master, cur_salt->data) == 0)\n\t\t\tcracked[index] = 1;\n\t\telse\n\t\t\tcracked[index] = 0;\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sapB_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ", struct db_salt *salt)\n{\n\tconst int count = *pcount;\n#if SIMD_COEF_32\n#if defined(_OPENMP)\n\tint t;\nfor (t = 0; t < threads; t++)\n#define ti (t*NBKEYS+index)\n#else\n#define t 0\n#define ti index\n\n\t{\n\t\tunsigned int index, i;\n\n\t\tfor (index = 0; index < NBKEYS; index++) {\n\t\t\tint len;\n\n\t\t\tif ((len = keyLen[ti]) < 0) {\n\t\t\t\tunsigned char *key;\n\n\t\t\t\t// Load key into vector buffer\n\t\t\t\tlen = 0;\n\t\t\t\tkey = (unsigned char*)saved_plain[ti];\n\t\t\t\twhile (*key)\n\t\t\t\t{\n\t\t\t\t\tsaved_key[GETPOS(len, ti)] =\n\t\t\t\t\t\ttranstable[*key++];\n\t\t\t\t\tlen++;\n\t\t\t\t}\n\n\t\t\t\t// Back-out of trailing spaces\n\t\t\t\twhile(len && *--key == ' ')\n\t\t\t\t{\n\t\t\t\t\tlen--;\n\t\t\t\t\tsaved_key[GETPOS(len, ti)] = 0;\n\t\t\t\t}\n\n\t\t\t\tkeyLen[ti] = len;\n\t\t\t}\n\n\t\t\t// Prepend the salt\n\t\t\tfor (i = 0; i < cur_salt->l; i++)\n\t\t\t\tsaved_key[GETPOS((len + i), ti)] =\n\t\t\t\t\tcur_salt->s[i];\n\n\t\t\tsaved_key[GETPOS((len + i), ti)] = 0x80;\n\t\t\t((unsigned int *)saved_key)[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (unsigned int)ti/SIMD_COEF_32*16*SIMD_COEF_32] = (len + i) << 3;\n\n\t\t\t// Clean rest of buffer\n\t\t\tfor (i = i + len + 1; i <= clean_pos[ti]; i++)\n\t\t\t\tsaved_key[GETPOS(i, ti)] = 0;\n\t\t\tclean_pos[ti] = len + cur_salt->l;\n\t\t}\n\n\t\tSIMDmd5body(&saved_key[t*NBKEYS*64],\n\t\t (unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);\n\n\t\tfor (i = 0; i < SIMD_PARA_MD5; i++)\n\t\t\tmemset(&interm_key[t*64*NBKEYS+i*64*SIMD_COEF_32+32*SIMD_COEF_32], 0, 32*SIMD_COEF_32);\n\n\t\tfor (index = 0; index < NBKEYS; index++) {\n\t\t\tunsigned int sum20;\n\t\t\t// note, without the union (just type casting to uint32_t*) was causing weird problems\n\t\t\t// compiling for ppc64 (BE). This was seen for other things, where typecasting caused\n\t\t\t// problems. Using a union, solved the problem fully.\n\t\t\tunion {\n\t\t\t\tunsigned char temp_key[BINARY_SIZE*2];\n\t\t\t\tuint32_t temp_keyw[BINARY_SIZE/2];\n\t\t\t} x;\n\t\t\tuint32_t destArray[TEMP_ARRAY_SIZE / 4];\n\t\t\tconst unsigned int *sw;\n\t\t\tunsigned int *dw;\n\n\t\t\t// Temporary flat copy of crypt\n\n\t\t\tsw = (unsigned int*)&crypt_key[GETOUTPOS(0, ti)];\n\t\t\tfor (i = 0; i < 4; i++, sw += SIMD_COEF_32)\n#if ARCH_LITTLE_ENDIAN\n\t\t\t\tx.temp_keyw[i] = *sw;\n#else\n\t\t\t\tx.temp_keyw[i] = JOHNSWAP(*sw);\n\n\n\t\t\t//now: walld0rf-magic [tm], (c), \n\t\t\tsum20 = walld0rf_magic(ti, x.temp_key, (unsigned char*)destArray);\n\n\t\t\t// Vectorize a word at a time\n#if ARCH_LITTLE_ENDIAN\n\t\t\tdw = (unsigned int*)&interm_key[GETPOS(0, ti)];\n\t\t\tfor (i = 0;i <= sum20; i += 4, dw += SIMD_COEF_32)\n\t\t\t\t*dw = destArray[i >> 2];\n#else\n\t\t\tdw = (unsigned int*)&interm_key[GETPOS(3, ti)];\n\t\t\tfor (i = 0;i <= sum20; i += 4, dw += SIMD_COEF_32)\n\t\t\t\t*dw = JOHNSWAP(destArray[i >> 2]);\n\n\n\t\t\t((unsigned int *)interm_key)[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (unsigned int)ti/SIMD_COEF_32*16*SIMD_COEF_32] = sum20 << 3;\n\t\t}\n\n\t\tSIMDmd5body(&interm_key[t*NBKEYS*64],\n\t\t (unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);\n\n\t\tfor (index = 0; index < NBKEYS; index++) {\n\t\t\t*(uint32_t*)&crypt_key[GETOUTPOS(0, ti)] ^= *(uint32_t*)&crypt_key[GETOUTPOS(8, ti)];\n\t\t\t*(uint32_t*)&crypt_key[GETOUTPOS(4, ti)] ^= *(uint32_t*)&crypt_key[GETOUTPOS(12, ti)];\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sapB_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "TOUTPOS(4, ti)] ^= *(uint32_t*)&crypt_key[GETOUTPOS(12, ti)];\n\t\t}\n\t}\n\n#else\n\n#ifdef _OPENMP\n\tint t;\nfor (t = 0; t < count; t++)\n#else\n#define t 0\n\n\t{\n\t\tunsigned char temp_key[BINARY_SIZE*2];\n\t\tunsigned char final_key[BINARY_SIZE*2];\n\t\tunsigned int i;\n\t\tunsigned int sum20;\n\t\tunsigned char destArray[TEMP_ARRAY_SIZE];\n\t\tMD5_CTX ctx;\n\n\t\tif (keyLen[t] < 0) {\n\t\t\tkeyLen[t] = strlen(saved_plain[t]);\n\n\t\t\t// Back-out of trailing spaces\n\t\t\twhile ( keyLen[t] && saved_plain[t][keyLen[t] - 1] == ' ' )\n\t\t\t{\n\t\t\t\tif (keyLen[t] == 0) break;\n\t\t\t\tsaved_plain[t][--keyLen[t]] = 0;\n\t\t\t}\n\n\t\t\tfor (i = 0; i < keyLen[t]; i++)\n\t\t\t\tsaved_key[t][i] = transtable[ARCH_INDEX(saved_plain[t][i])];\n\t\t}\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, saved_key[t], keyLen[t]);\n\t\tMD5_Update(&ctx, cur_salt->s, cur_salt->l);\n\t\tMD5_Final(temp_key,&ctx);\n\n\t\t//now: walld0rf-magic [tm], (c), \n\t\tsum20 = walld0rf_magic(t, temp_key, destArray);\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, destArray, sum20);\n\t\tMD5_Final(final_key, &ctx);\n\n\t\tfor (i = 0; i < 8; i++)\n\t\t\t((char*)crypt_key[t])[i] = final_key[i + 8] ^ final_key[i];\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/lastpass_cli_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tuint32_t key[MIN_KEYS_PER_CRYPT][8];\n\t\tint i;\n\n\t\tif (cur_salt->iterations != 1) {\n#ifdef SIMD_COEF_32\n\t\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT];\n\t\t\tunion {\n\t\t\t\tuint32_t *pout[MIN_KEYS_PER_CRYPT];\n\t\t\t\tunsigned char *poutc;\n\t\t\t} x;\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tlens[i] = strlen(saved_key[i+index]);\n\t\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\t\tx.pout[i] = key[i];\n\t\t\t}\n\t\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, &(x.poutc), 32, 0);\n\n#else\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tpbkdf2_sha256((unsigned char*)saved_key[i+index], strlen(saved_key[i+index]), cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, (unsigned char*)key[i], 32, 0);\n\t\t\t}\n\n\t\t} else {\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t\tSHA256_CTX ctx;\n\n\t\t\t\tSHA256_Init(&ctx);\n\t\t\t\tSHA256_Update(&ctx, cur_salt->salt, cur_salt->salt_length);\n\t\t\t\tSHA256_Update(&ctx, saved_key[i+index], strlen(saved_key[i+index]));\n\t\t\t\tSHA256_Final((unsigned char*)key[i], &ctx);\n\t\t\t}\n\t\t}\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tunsigned char iv[16];\n\t\t\tAES_KEY akey;\n\n\t\t\tmemcpy(iv, cur_salt->iv, 16);\n\t\t\tAES_set_encrypt_key((unsigned char*)key[i], 256, &akey);\n\t\t\tAES_cbc_encrypt((const unsigned char*)AGENT_VERIFICATION_STRING, (unsigned char*)crypt_out[i+index], BINARY_SIZE, &akey, iv, AES_ENCRYPT);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/EPI_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for private(i) shared(global_salt, saved_key, key_len, crypt_out)", "context_chars": 100, "text": "crypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint i=0;\n#ifdef _OPENMP\nfor (i = 0; i < count; ++i) {\n\t\tSHA_CTX ctx;\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, (unsigned char*)global_salt, SALT_LENGTH-1);\n\t\tSHA1_Update(&ctx, saved_key[i], key_len[i]);\n\t\tSHA1_Final((unsigned char*)crypt_out[i], &ctx);\n\t} #pragma omp parallel for private(i) shared(global_salt, saved_key, key_len, crypt_out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/gost_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tgost_ctx ctx;\n\n\t\tif (is_cryptopro)\n\t\t\tjohn_gost_cryptopro_init(&ctx);\n\t\telse\n\t\t\tjohn_gost_init(&ctx);\n\t\tjohn_gost_update(&ctx, (const unsigned char*)saved_key[index],\n\t\t\t strlen(saved_key[index]));\n\n\t\tjohn_gost_final(&ctx, (unsigned char *)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/lotus85_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = *pcount;\n\tint index = 0;\n\n\t/* Compute digest for all given plaintext passwords */\n#ifdef _OPENMP\nfor (index = 0; index < count; index++)\n\t{\n\t\tunsigned char user_key[8], deciphered_userid[LOTUS85_MAX_BLOB_SIZE];\n\t\tmemset(lotus85_last_binary_hash1[index], 0, BINARY_LENGTH);\n\t\tmemset(lotus85_last_binary_hash2[index], 0, BINARY_LENGTH);\n\t\tmemset(user_key, 0, sizeof(user_key));\n\t\tmemset(deciphered_userid, 0, sizeof(deciphered_userid));\n\n\t\t/* Derive password and retrieve RC2 key */\n\t\tget_user_id_secret_key(lotus85_saved_passwords[index], user_key);\n\n\t\t/* Deciphered user blob stored in user.id file */\n\t\tdecipher_userid_blob(cur_salt->lotus85_user_blob, cur_salt->lotus85_user_blob_len, user_key, deciphered_userid);\n\n\t\t/* Store first deciphered digest */\n\t\tmemcpy(lotus85_last_binary_hash1[index], deciphered_userid + cur_salt->lotus85_user_blob_len - BINARY_LENGTH, BINARY_LENGTH);\n\n\t\t/* Compute digest of deciphered message */\n\t\tcompute_msg_mac(deciphered_userid, cur_salt->lotus85_user_blob_len - BINARY_LENGTH, lotus85_last_binary_hash2[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/MD5_std.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(t) shared(n, salt_changed, saved_salt)", "context_chars": 100, "text": "crypt(int count)\n{\n#if MD5_std_mt\n\tint t, n = (count + (MD5_N - 1)) / MD5_N;\n#endif\n\n#ifdef _OPENMP\nfor_each_t(n) {\n/*\n * We could move the salt_changed check out of the parallel region (and have\n * two specialized parallel regions instead), but MD5_std_crypt_for_thread()\n * does so much work that the salt_changed check is negligible.\n */\n\t\tif (salt_changed)\n\t\t\tMD5_std_set_salt_for_thread(t, saved_salt);\n\t\tMD5_std_crypt_for_thread(t);\n\t} #pragma omp parallel for default(none) private(t) shared(n, salt_changed, saved_salt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/FGT_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_len, crypt_key, cp)", "context_chars": 100, "text": ", struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint i=0;\n\tchar *cp=FORTINET_MAGIC;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\tSHA_CTX ctx;\n\n\t\tmemcpy(&ctx, &ctx_salt, sizeof(ctx));\n\n\t\tSHA1_Update(&ctx, saved_key[i], saved_key_len[i]);\n\t\tSHA1_Update(&ctx, cp, FORTINET_MAGIC_LENGTH);\n\t\tSHA1_Final((unsigned char*)crypt_key[i], &ctx);\n\t} #pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_len, crypt_key, cp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/oldoffice_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tint i;\n\n\t\tif (oo_cur_salt->type < 3) {\n\t\t\tMD5_CTX ctx;\n\t\t\tunsigned char hashBuf[21 * 16];\n\t\t\tunsigned char key_hash[16];\n\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, saved_key[index], saved_len[index]);\n\t\t\tMD5_Final(key_hash, &ctx);\n\t\t\tfor (i = 0; i < 16; i++) {\n\t\t\t\tmemcpy(hashBuf + i * 21, key_hash, 5);\n\t\t\t\tmemcpy(hashBuf + i * 21 + 5, oo_cur_salt->salt, 16);\n\t\t\t}\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, hashBuf, 21 * 16);\n\t\t\tMD5_Final(mitm_key[index], &ctx);\n\t\t\tmemset(&mitm_key[index][5], 0, 11); // Truncate to 40 bits\n\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, mitm_key[index], 9);\n\t\t\tMD5_Final(rc4_key[index], &ctx);\n\t\t}\n\t\telse {\n\t\t\tSHA_CTX ctx;\n\t\t\tunsigned char H0[24];\n\t\t\tunsigned char key_hash[20];\n\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, oo_cur_salt->salt, 16);\n\t\t\tSHA1_Update(&ctx, saved_key[index], saved_len[index]);\n\t\t\tSHA1_Final(H0, &ctx);\n\t\t\tmemset(&H0[20], 0, 4);\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, H0, 24);\n\t\t\tSHA1_Final(key_hash, &ctx);\n\n\t\t\tif (oo_cur_salt->type < 4) {\n\t\t\t\tmemcpy(mitm_key[index], key_hash, 5);\n\t\t\t\tmemset(&mitm_key[index][5], 0, 11); // Truncate to 40 bits\n\t\t\t} else\n\t\t\tif (oo_cur_salt->type == 5) {\n\t\t\t\tmemcpy(mitm_key[index], key_hash, 7);\n\t\t\t\tmemset(&mitm_key[index][7], 0, 9); // Truncate to 56 bits\n\t\t\t} else\n\t\t\t\tmemcpy(mitm_key[index], key_hash, 16);\n\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/oldoffice_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n\n\tif (any_cracked) {\n\t\tmemset(oo_cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tRC4_KEY key;\n\n\t\tif (oo_cur_salt->type < 3) {\n\t\t\tMD5_CTX ctx;\n\t\t\tunsigned char pwdHash[16];\n\t\t\tunsigned char hashBuf[32];\n\n\t\t\tif (cur_binary->has_mitm && memcmp(cur_binary->mitm, mitm_key[index], 5))\n\t\t\t\tcontinue;\n\n\t\t\tRC4_set_key(&key, 16, rc4_key[index]); /* rc4Key */\n\t\t\tRC4(&key, 16, cur_binary->verifier, hashBuf); /* encryptedVerifier */\n\t\t\tRC4(&key, 16, cur_binary->verifierHash, hashBuf + 16); /* encryptedVerifierHash */\n\t\t\t/* hash the decrypted verifier */\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, hashBuf, 16);\n\t\t\tMD5_Final(pwdHash, &ctx);\n\t\t\tif (!memcmp(pwdHash, hashBuf + 16, 16))\n#ifdef _OPENMP\n#pragma omp critical\n\n\t\t\t{\n\t\t\t\tany_cracked = oo_cracked[index] = 1;\n\t\t\t\tcur_binary->has_mitm = 1;\n\t\t\t\tmemcpy(cur_binary->mitm, mitm_key[index], 5);\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tSHA_CTX ctx;\n\t\t\tunsigned char Hfinal[20];\n\t\t\tunsigned char DecryptedVerifier[16];\n\t\t\tunsigned char DecryptedVerifierHash[20];\n\n\t\t\tif (oo_cur_salt->type == 3 && !cur_binary->has_extra &&\n\t\t\t cur_binary->has_mitm && memcmp(cur_binary->mitm, mitm_key[index], 5))\n\t\t\t\tcontinue;\n\n\t\t\tRC4_set_key(&key, 16, mitm_key[index]); /* dek */\n\t\t\tRC4(&key, 16, cur_binary->verifier, DecryptedVerifier);\n\t\t\tRC4(&key, 16, cur_binary->verifierHash, DecryptedVerifierHash);\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, DecryptedVerifier, 16);\n\t\t\tSHA1_Final(Hfinal, &ctx);\n\t\t\tif (memcmp(Hfinal, DecryptedVerifierHash, 16))\n\t\t\t\tcontinue;\n\t\t\tif (oo_cur_salt->type == 3 && cur_binary->has_extra) {\n\t\t\t\tSHA_CTX ctx;\n\t\t\t\tunsigned char H0[24];\n\t\t\t\tunsigned char key_hash[20];\n\t\t\t\tuint8_t data[32];\n\t\t\t\tint i, num_zero = 0;\n\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, oo_cur_salt->salt, 16);\n\t\t\t\tSHA1_Update(&ctx, saved_key[index], saved_len[index]);\n\t\t\t\tSHA1_Final(H0, &ctx);\n\t\t\t\tmemcpy(&H0[20], \"\\1\\0\\0\\0\", 4);\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, H0, 24);\n\t\t\t\tSHA1_Final(key_hash, &ctx);\n\n\t\t\t\tmemset(key_hash + 40/8, 0, sizeof(key_hash) - 40/8);\n\t\t\t\tRC4_set_key(&key, 16, key_hash);\n\t\t\t\tRC4(&key, 32, cur_binary->extra, data);\n\t\t\t\tfor (i = 0; i < 32; i++)\n\t\t\t\t\tif (data[i] == 0)\n\t\t\t\t\t\tnum_zero++;\n\t\t\t\tif (num_zero < 10)\n\t\t\t\t\tcontinue;\n\t\t\t}\n\t\t\t/* If we got here, looks like we have a candidate */\n#ifdef _OPENMP\n#pragma omp critical\n\n\t\t\t{\n\t\t\t\tany_cracked = oo_cracked[index] = 1;\n\t\t\t\tif (oo_cur_salt->type < 4) {\n\t\t\t\t\tcur_binary->has_mitm = 1;\n\t\t\t\t\tmemcpy(cur_binary->mitm, mitm_key[index], 5);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/rawMD5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t;\n\tint index;\n\n\tint loops = (count + MIN_KEYS_PER_CRYPT - 1) / MIN_KEYS_PER_CRYPT;\n\n#ifdef _OPENMP\nfor (index = 0; index < loops; index++) {\n#if SIMD_COEF_32\n\t\tSIMDmd5body(saved_key[index], crypt_key[index], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);\n#else\n\t\tMD5_CTX ctx;\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tMD5_Final((unsigned char *)crypt_key[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/nsec3_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "ic int crypt_all(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint i;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; ++i) {\n\t\tuint16_t iterations = saved_salt.iterations;\n\t\tSHA_CTX ctx;\n\n\t\tSHA1_Init(&ctx);\n\t\tif (saved_key_length[i] > 0) {\n\t\t\tunsigned char label_wf[PLAINTEXT_LENGTH + 2];\n\t\t\tlabels_to_wireformat(saved_key[i],\n\t\t\t saved_key_length[i],\n\t\t\t label_wf);\n\t\t\tSHA1_Update(&ctx, label_wf, saved_key_length[i] + 1);\n\t\t}\n/* Minor optimization potential: the above can be performed in set_key() */\n/* Major optimization potential: use SIMD */\n\t\tSHA1_Update(&ctx, saved_salt.zone_wf, saved_salt.zone_wf_length);\n\t\tSHA1_Update(&ctx, saved_salt.salt, saved_salt.salt_length);\n\t\tSHA1_Final((unsigned char *)crypt_out[i], &ctx);\n\t\twhile (iterations--) {\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, crypt_out[i], BINARY_SIZE);\n\t\t\tSHA1_Update(&ctx, saved_salt.salt, saved_salt.salt_length);\n\t\t\tSHA1_Final((unsigned char *)crypt_out[i], &ctx);\n\t\t}\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/MSCHAPv2_bs_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint i;\n\n\tif (!keys_prepared) {\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\t\tint len;\n\n\t\t\t/* Generate 16-byte NTLM hash */\n\t\t\tlen = E_md4hash((uchar *) saved_plain[i], saved_len[i],\n\t\t\t saved_key[i]);\n\n\t\t\tif (len <= 0)\n\t\t\t\tsaved_plain[i][-len] = 0; // match truncation\n\n\t\t\t/* NULL-padding the 16-byte hash to 21-bytes is made\n\t\t\t in cmp_exact if needed */\n\n\t\t\tsetup_des_key(saved_key[i], i);\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ripemd_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt crypt_160(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_ripemd160_context ctx;\n\n\t\tsph_ripemd160_init(&ctx);\n\t\tsph_ripemd160(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_ripemd160_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/ripemd_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt crypt_128(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_ripemd128_context ctx;\n\n\t\tsph_ripemd128_init(&ctx);\n\t\tsph_ripemd128(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_ripemd128_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/krb5pa-sha1_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "unt = *pcount;\n\tconst int key_size = (cur_salt->etype == 17) ? 16 : 32;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char tkey[MIN_KEYS_PER_CRYPT][32];\n\t\tunsigned char base_key[32];\n\t\tunsigned char Ke[32];\n\t\tunsigned char plaintext[TIMESTAMP_SIZE];\n\t\tint i;\n\t\tint len[MIN_KEYS_PER_CRYPT];\n#ifdef SIMD_COEF_32\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = tkey[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt,strlen((char*)cur_salt->salt), 4096, pout, key_size, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen[i] = strlen(saved_key[index+i]);\n\t\t}\n\t\tpbkdf2_sha1((const unsigned char*)saved_key[index], len[0],\n\t\t cur_salt->salt,strlen((char*)cur_salt->salt),\n\t\t 4096, tkey[0], key_size, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t// generate 128 bits from 40 bits of \"kerberos\" string\n\t\t\t// This is precomputed in init()\n\t\t\t//nfold(8 * 8, (unsigned char*)\"kerberos\", 128, constant);\n\n\t\t\tdk(base_key, tkey[i], key_size, constant, 16);\n\n\t\t\t/* The \"well-known constant\" used for the DK function is the key usage number,\n\t\t\t * expressed as four octets in big-endian order, followed by one octet indicated below.\n\t\t\t * Kc = DK(base-key, usage | 0x99);\n\t\t\t * Ke = DK(base-key, usage | 0xAA);\n\t\t\t * Ki = DK(base-key, usage | 0x55); */\n\n\t\t\t// derive Ke for decryption/encryption\n\t\t\t// This is precomputed in init()\n\t\t\t//memset(usage, 0, sizeof(usage));\n\t\t\t//usage[3] = 0x01; // key number in big-endian format\n\t\t\t//usage[4] = 0xAA; // used to derive Ke\n\n\t\t\t//nfold(sizeof(usage) * 8, usage, sizeof(ke_input) * 8, ke_input);\n\t\t\tdk(Ke, base_key, key_size, ke_input, 16);\n\n\t\t\t// decrypt the AS-REQ timestamp encrypted with 256-bit AES\n\t\t\t// here is enough to check the string, further computation below is required\n\t\t\t// to fully verify the checksum\n\t\t\tkrb_decrypt(cur_salt->ct, TIMESTAMP_SIZE, plaintext, Ke, key_size);\n\n\t\t\t// Check a couple bytes from known plain (YYYYMMDDHHMMSSZ) and\n\t\t\t// bail out if we are out of luck.\n\t\t\tif (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') {\n\t\t\t\tunsigned char Ki[32];\n\t\t\t\tunsigned char checksum[20];\n\t\t\t\t// derive Ki used in HMAC-SHA-1 checksum\n\t\t\t\t// This is precomputed in init()\n\t\t\t\t//memset(usage, 0, sizeof(usage));\n\t\t\t\t//usage[3] = 0x01; // key number in big-endian format\n\t\t\t\t//usage[4] = 0x55; // used to derive Ki\n\t\t\t\t//nfold(sizeof(usage) * 8, usage, sizeof(ki_input) * 8, ki_input);\n\t\t\t\tdk(Ki, base_key, key_size, ki_input, 16);\n\t\t\t\t// derive checksum of plaintext\n\t\t\t\thmac_sha1(Ki, key_size, plaintext, TIMESTAMP_SIZE, checksum, 20);\n\t\t\t\tmemcpy(crypt_out[index+i], checksum, BINARY_SIZE);\n\t\t\t} else {\n\t\t\t\tmemset(crypt_out[index+i], 0, BINARY_SIZE);\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sha3_512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tKeccak_HashInstance hash;\n\t\tKeccak_HashInitialize(&hash, 576, 1024, 512, 0x06);\n\t\tKeccak_HashUpdate(&hash, (unsigned char*)saved_key[index], saved_len[index] * 8);\n\t\tKeccak_HashFinal(&hash, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/clipperz_srp_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t crypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint j;\n#ifdef _OPENMP\nfor (j = 0; j < count; ++j) {\n\t\tSHA256_CTX ctx;\n\t\tunsigned char Tmp[32];\n\t\tunsigned char TmpHex[64];\n\n\t\tmemset(crypt_out[j], 0, sizeof(crypt_out[j]));\n\t\tSHA256_Init(&ctx);\n\t\tSHA256_Update(&ctx, saved_key[j], strlen(saved_key[j]));\n\t\tSHA256_Update(&ctx, cur_salt->user_id, strlen((char*)cur_salt->user_id));\n\t\tSHA256_Final(Tmp, &ctx);\n\t\tSHA256_Init(&ctx);\n\t\tSHA256_Update(&ctx, Tmp, 32);\n\t\tSHA256_Final(Tmp, &ctx);\n\t\tSHA256_Init(&ctx);\n\t\tSHA256_Update(&ctx, cur_salt->saved_salt, strlen((char*)cur_salt->saved_salt));\n\t\thex_encode(Tmp, 32, TmpHex);\n\t\tSHA256_Update(&ctx, TmpHex, 64);\n\t\tSHA256_Final(Tmp, &ctx);\n\t\tSHA256_Init(&ctx);\n\t\tSHA256_Update(&ctx, Tmp, 32);\n\t\tSHA256_Final(Tmp, &ctx);\n\n#ifdef HAVE_LIBGMP\n\t{\n\t\tunsigned char HashStr[80], *p;\n\t\tint i, todo;\n\t\tp = HashStr;\n\t\tfor (i = 0; i < 32; ++i) {\n\t\t\t*p++ = itoa16[Tmp[i]>>4];\n\t\t\t*p++ = itoa16[Tmp[i]&0xF];\n\t\t}\n\t\t*p = 0;\n\n\t\tmpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16);\n\t\tmpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod );\n\t\tmpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop);\n\n\t\tp = HashStr;\n\t\ttodo = strlen((char*)p);\n\t\tif (todo&1) {\n\t\t\t((unsigned char*)(crypt_out[j]))[0] = atoi16[ARCH_INDEX(*p)];\n\t\t\t++p;\n\t\t\t--todo;\n\t\t} else {\n\t\t\t((unsigned char*)(crypt_out[j]))[0] =\n\t\t\t\t(atoi16[ARCH_INDEX(*p)] << 4) |\n\t\t\t\tatoi16[ARCH_INDEX(p[1])];\n\t\t\tp += 2;\n\t\t\ttodo -= 2;\n\t\t}\n\t\ttodo >>= 1;\n\t\tfor (i = 1; i <= todo; i++) {\n\t\t\t((unsigned char*)(crypt_out[j]))[i] =\n\t\t\t\t(atoi16[ARCH_INDEX(*p)] << 4) |\n\t\t\t\tatoi16[ARCH_INDEX(p[1])];\n\t\t\tp += 2;\n\t\t}\n\t}\n#else\n\t\t// using oSSL's BN to do expmod.\n\t\tpSRP_CTX[j].z_exp = BN_bin2bn(Tmp,32,pSRP_CTX[j].z_exp);\n\t\tBN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx);\n\t\tBN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j]));\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sspr_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tuint32_t c;\n\t\tSHA_CTX ctx;\n\t\tSHA256_CTX sctx;\n\t\tSHA512_CTX sctx2;\n\t\tMD5_CTX mctx;\n\t\tunsigned char buf[64];\n\n\t\tif (cur_salt->fmt == 0) {\n\t\t\tMD5_Init(&mctx);\n\t\t\tMD5_Update(&mctx, (const unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\t\tMD5_Final(buf, &mctx);\n\t\t\tfor (c = 1; c < cur_salt->iterations; c++) {\n\t\t\t\tMD5_Init(&mctx);\n\t\t\t\tMD5_Update(&mctx, buf, 16);\n\t\t\t\tMD5_Final(buf, &mctx);\n\t\t\t}\n\t\t} else if (cur_salt->fmt == 1) {\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\t\tSHA1_Final(buf, &ctx);\n\t\t\tfor (c = 1; c < cur_salt->iterations; c++) {\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, buf, 20);\n\t\t\t\tSHA1_Final(buf, &ctx);\n\t\t\t}\n\t\t} else if (cur_salt->fmt == 2) {\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, cur_salt->salt, cur_salt->saltlen);\n\t\t\tSHA1_Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\t\tSHA1_Final(buf, &ctx);\n\t\t\tfor (c = 1; c < cur_salt->iterations; c++) {\n\t\t\t\tSHA1_Init(&ctx);\n\t\t\t\tSHA1_Update(&ctx, buf, 20);\n\t\t\t\tSHA1_Final(buf, &ctx);\n\t\t\t}\n\t\t} else if (cur_salt->fmt == 3) {\n\t\t\tSHA256_Init(&sctx);\n\t\t\tSHA256_Update(&sctx, cur_salt->salt, cur_salt->saltlen);\n\t\t\tSHA256_Update(&sctx, (const unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\t\tSHA256_Final(buf, &sctx);\n\t\t\tfor (c = 1; c < cur_salt->iterations; c++) {\n\t\t\t\tSHA256_Init(&sctx);\n\t\t\t\tSHA256_Update(&sctx, buf, 32);\n\t\t\t\tSHA256_Final(buf, &sctx);\n\t\t\t}\n\t\t} else if (cur_salt->fmt == 4) {\n\t\t\tSHA512_Init(&sctx2);\n\t\t\tSHA512_Update(&sctx2, cur_salt->salt, cur_salt->saltlen);\n\t\t\tSHA512_Update(&sctx2, (const unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\t\tSHA512_Final(buf, &sctx2);\n\t\t\tfor (c = 1; c < cur_salt->iterations; c++) {\n\t\t\t\tSHA512_Init(&sctx2);\n\t\t\t\tSHA512_Update(&sctx2, buf, 64);\n\t\t\t\tSHA512_Final(buf, &sctx2);\n\t\t\t}\n\t\t}\n\t\tmemcpy(crypt_out[index], buf, BINARY_SIZE_MIN);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mysql_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, binary, crypt_key, retval)", "context_chars": 100, "text": "ndex][0];\n}\n\nstatic int cmp_all(void* binary, int count)\n{\n\tint i;\n\n#ifdef _OPENMP\n\tint retval = 0;\nfor (i = 0; i < count; i++)\n\t\tif (*(uint32_t *)binary == crypt_key[i][0])\n#pragma omp atomic\n\t\t\tretval |= 1;\n\treturn retval;\n#else\n\tfor (i = 0; i < count; i++)\n\t\tif (*(uint32_t *)binary == crypt_key[i][0])\n\t\t\treturn 1;\n\treturn 0;\n\n}\n\nstatic int cmp_exact(char* source, int index)\n{\n\tuint32_t *binary = get_binary_size(source, 8);\n\tregister uint32_t nr = 1345345333, add = 7, nr2 = 0x12345671;\n\tregister uint32_t tmp;\n\tunsigned char *p;\n\n\tp = (unsigned char *)saved_key[index];\n\tfor (; *p; p++) {\n\t\tif (*p == ' ' || *p == '\\t')\n\t\t\tcontinue;\n\n\t\ttmp = (uint32_t)*p;\n\t\tnr ^= (((nr & 63) + add) * tmp) + (nr << 8);\n\t\tnr2 += (nr2 << 8) ^ nr;\n\t\tadd += tmp;\n\t}\n\n\treturn\n\t\tbinary[0] == (nr & (((uint32_t)1 << 31) - 1)) &&\n\t\tbinary[1] == (nr2 & (((uint32_t)1 << 31) - 1));\n} #pragma omp parallel for default(none) private(i) shared(count, binary, crypt_key, retval)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mysql_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, saved_key, crypt_key)", "context_chars": 100, "text": "ic int crypt_all(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint i;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\tunsigned char *p = (unsigned char *)saved_key[i];\n\n\t\tif (*p) {\n\t\t\tuint32_t nr, add;\n\t\t\tuint32_t tmp;\n\t\t\twhile (*p == ' ' || *p == '\\t')\n\t\t\t\tp++;\n\t\t\ttmp = (uint32_t) (unsigned char) *p++;\n\t\t\tnr = 1345345333 ^ ((((1345345333 & 63) + 7) * tmp) + (1345345333U << 8));\n\t\t\tadd = 7 + tmp;\n\t\t\tfor (; *p; p++) {\n\t\t\t\tif (*p == ' ' || *p == '\\t')\n\t\t\t\t\tcontinue;\n\t\t\t\ttmp = (uint32_t) (unsigned char) *p;\n\t\t\t\tnr ^= (((nr & 63) + add) * tmp) + (nr << 8);\n\t\t\t\tadd += tmp;\n\t\t\t}\n\t\t\tcrypt_key[i][0] = (nr & (((uint32_t)1 << 31) - 1));\n\t\t\tcontinue;\n\t\t}\n\t\tcrypt_key[i][0] = (1345345333 & (((uint32_t)1 << 31) - 1));\n\t} #pragma omp parallel for default(none) private(i) shared(count, saved_key, crypt_key)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/keplr_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\tint failed = 0;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tuint8_t key[32];\n\t\tint len = strlen(saved_key[index]);\n\n#ifdef _OPENMP\n\t\tif (cracked[index]) /* avoid false sharing of nearby elements */\n\n\t\t\tcracked[index] = 0;\n\n#ifdef _OPENMP\n\t\tint t = omp_get_thread_num();\n\t\tif (t >= max_threads) {\n\t\t\tfailed = -1;\n\t\t\tcontinue;\n\t\t}\n#else\n\t\tconst int t = 0;\n\n\t\t// Scrypt part\n\t\tyescrypt_params_t params = { .N = 131072, .r = 8, .p = 1 };\n\t\tif (yescrypt_kdf(NULL, &local[t],\n\t\t\t(const uint8_t *)saved_key[index], len,\n\t\t\t(const uint8_t *)cur_salt->salt, 32,\n\t\t\t¶ms, key, 32)) {\n\t\t\tfailed = errno ? errno : EINVAL;\n#ifndef _OPENMP\n\t\t\tbreak;\n\n\t\t}\n\t\t// Sha256 part\n\t\tSHA256_CTX ctx;\n\t\tSHA256_Init(&ctx);\n\t\tSHA256_Update(&ctx, key + 16, 16);\n\t\tSHA256_Update(&ctx, cur_salt->ciphertext, cur_salt->ciphertext_size);\n\t\tSHA256_Final(key, &ctx);\n\n\t\t// Comparison part\n\t\tif (!memcmp(key, cur_salt->mac, 32))\n\t\t\tcracked[index] = 1;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/bitwarden_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = 0;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, 32, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i)\n\t\t\tpbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, master[i], 32, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tif (bitwarden_decrypt(cur_salt, master[i])) {\n\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/SybasePROP_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tgenerate_hash((unsigned char*)saved_key[index], saved_salt,\n\t\t (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/o10glogon_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ORACLE\n\t\tdump_stuff_msg(\"cur_salt \", buf, cur_salt->userlen+key_length);\n#endif\n\n#ifdef _OPENMP\nfor (idx = 0; idx < count; idx++) {\n\t\tunsigned char buf[256], buf1[256];\n\t\tunsigned int l;\n\t\tuint32_t iv[2];\n\t\tDES_key_schedule desschedule2;\n\n\t\tl = cur_salt->userlen + cur_key_len[idx];\n\t\tmemcpy(buf, cur_salt->user, cur_salt->userlen);\n\t\tmemcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]);\n\n\t\tiv[0] = iv[1] = 0;\n\t\tDES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT);\n\t\tDES_set_key_unchecked((DES_cblock *)iv, &desschedule2);\n\t\tiv[0] = iv[1] = 0;\n\t\tDES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT);\n\n#ifdef DEBUG_ORACLE\n\t\tdump_stuff_msg(\" iv (the hash key) \", (unsigned char*)&iv[0], 8);\n\n\n\t\tORACLE_TNS_Decrypt_Password_10g ((unsigned char*)iv, cur_salt->auth_sesskey, cur_salt->auth_sesskey_c, cur_salt->auth_pass, cur_salt->auth_pass_len, buf);\n\t\tif (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx])))\n\t\t{\n\t\t\tcracked[idx] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/NETNTLM_bs_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint i;\n\n\tif (!keys_prepared) {\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\t\tint len;\n\n\t\t\t/* Generate 16-byte NTLM hash */\n\t\t\tlen = E_md4hash((uchar *) saved_plain[i], saved_len[i],\n\t\t\t saved_key[i]);\n\n\t\t\tif (len <= 0)\n\t\t\t\tsaved_plain[i][-len] = 0; // match truncation\n\n\t\t\t/* NULL-padding the 16-byte hash to 21-bytes is made\n\t\t\t in cmp_exact if needed */\n\n\t\t\tsetup_des_key(saved_key[i], i);\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/krb5_tgsrep_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "dex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT)\n\t{\n\t\tunsigned char tkey[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n\n#ifdef SIMD_COEF_32\n\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = tkey[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, (unsigned char*)cur_salt->salt, strlen(cur_salt->salt), 4096, pout, key_size, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tpbkdf2_sha1((const unsigned char*)saved_key[index], strlen(saved_key[index+i]),\n\t\t\t (unsigned char*)cur_salt->salt, strlen(cur_salt->salt),\n\t\t\t 4096, tkey[i], key_size, 0);\n\t\t}\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tunsigned char Ki[32];\n\t\t\tunsigned char plaintext[cur_salt->edata2len];\n\t\t\tunsigned char checksum[20];\n\t\t\tunsigned char base_key[32];\n\t\t\tunsigned char Ke[32];\n\n\t\t\tdk(base_key, tkey[i], key_size, constant, 16);\n\t\t\tdk(Ke, base_key, key_size, ke_input, 16);\n\t\t\tkrb_decrypt(cur_salt->edata2, cur_salt->edata2len, plaintext, Ke, key_size);\n\t\t\t// derive checksum of plaintext\n\t\t\tdk(Ki, base_key, key_size, ki_input, 16);\n\t\t\thmac_sha1(Ki, key_size, plaintext, cur_salt->edata2len, checksum, 20);\n\n\t\t\tif (!memcmp(checksum, cur_salt->edata1, 12)) {\n\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t}\n\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/racf_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (dirty) {\n\t\t\tDES_cblock des_key;\n\t\t\tint i;\n\n\t\t\t/* process key */\n\t\t\tfor (i = 0; saved_key[index][i]; i++)\n\t\t\t\tdes_key[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][i])];\n\n\t\t\t/* replace missing characters in userid by (EBCDIC space (0x40) XOR 0x55) << 1 */\n\t\t\twhile(i < 8)\n\t\t\t\tdes_key[i++] = 0x2a;\n\n\t\t\tDES_set_key_unchecked(&des_key, &schedules[index]);\n\t\t}\n\t\t/* do encryption */\n\t\tDES_ecb_encrypt((const_DES_cblock*)cur_salt->userid, (DES_cblock*)crypt_out[index], &schedules[index], DES_ENCRYPT);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/cloudkeychain_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SSE_GROUP_SZ_SHA512\n\t\tint lens[SSE_GROUP_SZ_SHA512], i;\n\t\tunsigned char *pin[SSE_GROUP_SZ_SHA512];\n\t\tuint64_t key[SSE_GROUP_SZ_SHA512][8];\n\t\tunion {\n\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA512];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = (uint32_t*)(key[i]);\n\t\t}\n\t\tpbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, &(x.poutc), HASH_LENGTH, 0);\n\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA512; ++i)\n\t\t\tcracked[index+i] = ckcdecrypt((unsigned char*)(key[i]));\n#else\n\t\tuint64_t key[8];\n\t\tpbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]),\n\t\t\tcur_salt->salt, cur_salt->saltlen,\n\t\t\tcur_salt->iterations, (unsigned char*)key, HASH_LENGTH, 0);\n\t\tcracked[index] = ckcdecrypt((unsigned char*)key);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/androidbackup_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char pkey[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = pkey[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->user_salt, cur_salt->user_salt_length, cur_salt->iterations, pout, 32, 0);\n#else\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\tpbkdf2_sha1((unsigned char *)saved_key[index+i],\n\t\t\t\t\tstrlen(saved_key[index+i]),\n\t\t\t\t\tcur_salt->user_salt, cur_salt->user_salt_length, cur_salt->iterations,\n\t\t\t\t\tpkey[i], 32, 0);\n\t\t}\n\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++) {\n\t\t\tif (check_password(pkey[i], cur_salt)) {\n\t\t\t\tcracked[index+i] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\t\tany_cracked |= 1;\n\t\t\t} else {\n\t\t\t\tcracked[index+i] = 0;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/net_md5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ur_salt->magic != MAGIC) {\n\t\treturn pDynamicFmt->methods.crypt_all(pcount, salt);\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index++)\n\t{\n\t\tMD5_CTX ctx;\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, cur_salt->salt, cur_salt->length);\n\t\tMD5_Update(&ctx, saved_key[index], PLAINTEXT_LENGTH);\n\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/vdi_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " crypt_all(int *pcount, struct db_salt *salt)\n{\n\tint i;\n\tconst int count = *pcount;\n\n#ifdef _OPENMP\nfor (i = 0; i < count; i += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char key[MAX_KEY_LEN];\n#if SSE_GROUP_SZ_SHA256\n\t\tunsigned char Keys[SSE_GROUP_SZ_SHA256][MAX_KEY_LEN];\n\t\tunsigned char Decr[SSE_GROUP_SZ_SHA256][MAX_KEY_LEN];\n#else\n\t\tunsigned char Decr[1][MAX_KEY_LEN];\n\t\tint ksz = strlen((char *)key_buffer[i]);\n\n\t\tint j;\n\n#if SSE_GROUP_SZ_SHA256\n\t\tint lens[SSE_GROUP_SZ_SHA256];\n\t\tunsigned char *pin[SSE_GROUP_SZ_SHA256];\n\t\tunion {\n\t\t\tunsigned char *pout[SSE_GROUP_SZ_SHA256];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) {\n\t\t\tlens[j] = strlen((char*)(key_buffer[i+j]));\n\t\t\tpin[j] = key_buffer[i+j];\n\t\t\tx.pout[j] = Keys[j];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, psalt->salt1, psalt->saltlen, psalt->rounds1, &(x.poutc), psalt->keylen, 0);\n#else\n\t\tpbkdf2_sha256((const unsigned char*)key_buffer[i], ksz, psalt->salt1, psalt->saltlen, psalt->rounds1, key, psalt->keylen, 0);\n\n\t\tfor (j = 0; j < MIN_KEYS_PER_CRYPT; ++j) {\n#if SSE_GROUP_SZ_SHA256\n\t\t\tmemcpy(key, Keys[j], sizeof(key));\n\n\t\t\t// Try to decrypt using AES\n\t\t\tAES_XTS_decrypt(key, Decr[j], psalt->encr, psalt->keylen, psalt->evp_type);\n\t\t}\n\n#if SSE_GROUP_SZ_SHA256\n\t\tfor (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) {\n\t\t\tlens[j] = psalt->keylen;\n\t\t\tpin[j] = Decr[j];\n\t\t\tx.pout[j] = crypt_out[i+j];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, psalt->salt2, psalt->saltlen, psalt->rounds2, &(x.poutc), psalt->saltlen, 0);\n#else\n\t\tpbkdf2_sha256(Decr[0], psalt->keylen, psalt->salt2, psalt->saltlen, psalt->rounds2, crypt_out[i], psalt->saltlen, 0);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/agilekeychain_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tunsigned char master[MAX_KEYS_PER_CRYPT][32];\n\t\tint lens[MAX_KEYS_PER_CRYPT], i;\n\t\tunsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt[0], cur_salt->saltlen[0], cur_salt->iterations[0], pout, 16, 0);\n\t\tfor (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {\n\t\t\tif (akcdecrypt(master[i], cur_salt->ct[0]) == 0)\n\t\t\t\tcracked[i+index] = 1;\n\t\t\telse\n\t\t\t\tcracked[i+index] = 0;\n\t\t}\n#else\n\t\tunsigned char master[32];\n\t\tpbkdf2_sha1((unsigned char *)saved_key[index],\n\t\t strlen(saved_key[index]),\n\t\t cur_salt->salt[0], cur_salt->saltlen[0],\n\t\t cur_salt->iterations[0], master, 16, 0);\n\t\tif (akcdecrypt(master, cur_salt->ct[0]) == 0)\n\t\t\tcracked[index] = 1;\n\t\telse\n\t\t\tcracked[index] = 0;\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/stribog_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_256(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\t/* GOST34112012Context ctx;\n\n\t\tGOST34112012Init(&ctx, 256);\n\t\tGOST34112012Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\tGOST34112012Final(&ctx, (unsigned char*)crypt_out[index]); */\n\n\t\tGOST34112012Context ctx[2]; // alignment stuff\n\n\t\tstribog256_init((void *)ctx);\n\t\tstribog_update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\tstribog_final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/stribog_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_512(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tGOST34112012Context ctx[2]; // alignment stuff\n\n\t\tstribog512_init((void *)ctx);\n\t\tstribog_update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\tstribog_final((unsigned char*)crypt_out[index], &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/snefru_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt crypt_256(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsnefru_ctx ctx;;\n\n\t\trhash_snefru256_init(&ctx);\n\t\trhash_snefru_update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\trhash_snefru_final(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/snefru_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt crypt_128(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsnefru_ctx ctx;;\n\n\t\trhash_snefru128_init(&ctx);\n\t\trhash_snefru_update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]));\n\t\trhash_snefru_final(&ctx, (unsigned char*)crypt_out[index]);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/dragonfly4_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tSHA512_CTX ctx;\n\n\t\tSHA512_Init(&ctx);\n\n\t\t/* First the password */\n\t\tSHA512_Update(&ctx, saved_key[index], saved_len[index]);\n\n\t\t/* Then the salt, including the $4$ magic */\n\t\tSHA512_Update(&ctx, cur_salt, salt_len);\n\n\t\tSHA512_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/radmin_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++)\n\t{\n\t\tMD5_CTX ctx;\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, saved_key[index], sizeof(saved_key[index]));\n\t\tMD5_Final((unsigned char *)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/MD5_fmt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n#ifdef SIMD_PARA_MD5\n#ifdef _OPENMP\n\tint t;\nfor (t = 0; t < omp_para; t++)\n\t\tmd5cryptsse((unsigned char *)(&saved_key[t*MD5_N]), cursalt, (char *)(&sout[t*MD5_N*BINARY_SIZE/sizeof(MD5_word)]), CryptType);\n#else\n\tmd5cryptsse((unsigned char *)saved_key, cursalt, (char *)sout, CryptType);\n\n#else\n\tMD5_std_crypt(count);\n\n\treturn count;\n}\n\nstatic int cmp_all(void *binary, int count)\n{\n#ifdef SIMD_PARA_MD5\n\tunsigned int x,y;\n\n\tfor (y=0;y #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/o3logon_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ORACLE\n\t\tdump_stuff_msg(\"cur_salt \", buf, cur_salt->userlen+key_length);\n#endif\n\n#ifdef _OPENMP\nfor (idx = 0; idx < count; idx++) {\n\t\tunsigned char buf[256], buf1[256];\n\t\tunsigned int l;\n\t\tuint32_t iv[2];\n\t\tDES_key_schedule desschedule2;\n\n\t\tl = cur_salt->userlen + cur_key_len[idx];\n\t\tmemcpy(buf, cur_salt->user, cur_salt->userlen);\n\t\tmemcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]);\n\n\t\tiv[0] = iv[1] = 0;\n\t\tDES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT);\n\t\tDES_set_key_unchecked((DES_cblock *)iv, &desschedule2);\n\t\tiv[0] = iv[1] = 0;\n\t\tDES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT);\n\n#ifdef DEBUG_ORACLE\n\t\tdump_stuff_msg(\" iv (the hash key) \", (unsigned char*)&iv[0], 8);\n\n\n\t\tORACLE_TNS_Decrypt_Password_9i ((unsigned char*)iv, cur_salt->auth_sesskey, 16, cur_salt->auth_pass, cur_salt->auth_pass_len, buf);\n\t\tif (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx])))\n\t\t{\n\t\t\tcracked[idx] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/timeroast_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last)", "context_chars": 100, "text": " output1x[4 * index] & PH_MASK_6; }\n\nstatic void nt_hash(int count)\n{\n\tint i;\n\n#if defined(_OPENMP)\nfor (i = 0; i < count; i++) {\n\t\tunsigned int a;\n\t\tunsigned int b;\n\t\tunsigned int c;\n\t\tunsigned int d;\n\n\t\t/* Round 1 */\n\t\ta = \t\t0xFFFFFFFF \t\t + ms_buffer1x[16 * i + 0];a = (a << 3 ) | (a >> 29);\n\t\td = INIT_D + (INIT_C ^ (a & 0x77777777)) + ms_buffer1x[16 * i + 1];d = (d << 7 ) | (d >> 25);\n\t\tc = INIT_C + (INIT_B ^ (d & (a ^ INIT_B)))+ ms_buffer1x[16 * i + 2];c = (c << 11) | (c >> 21);\n\t\tb = INIT_B + (a ^ (c & (d ^ a))) \t + ms_buffer1x[16 * i + 3];b = (b << 19) | (b >> 13);\n\n\t\ta += (d ^ (b & (c ^ d))) + ms_buffer1x[16 * i + 4] ;a = (a << 3 ) | (a >> 29);\n\t\td += (c ^ (a & (b ^ c))) + ms_buffer1x[16 * i + 5] ;d = (d << 7 ) | (d >> 25);\n\t\tc += (b ^ (d & (a ^ b))) + ms_buffer1x[16 * i + 6] ;c = (c << 11) | (c >> 21);\n\t\tb += (a ^ (c & (d ^ a))) + ms_buffer1x[16 * i + 7] ;b = (b << 19) | (b >> 13);\n\n\t\ta += (d ^ (b & (c ^ d))) + ms_buffer1x[16 * i + 8] ;a = (a << 3 ) | (a >> 29);\n\t\td += (c ^ (a & (b ^ c))) + ms_buffer1x[16 * i + 9] ;d = (d << 7 ) | (d >> 25);\n\t\tc += (b ^ (d & (a ^ b))) + ms_buffer1x[16 * i + 10] ;c = (c << 11) | (c >> 21);\n\t\tb += (a ^ (c & (d ^ a))) + ms_buffer1x[16 * i + 11] ;b = (b << 19) | (b >> 13);\n\n\t\ta += (d ^ (b & (c ^ d))) + ms_buffer1x[16 * i + 12] ;a = (a << 3 ) | (a >> 29);\n\t\td += (c ^ (a & (b ^ c))) + ms_buffer1x[16 * i + 13] ;d = (d << 7 ) | (d >> 25);\n\t\tc += (b ^ (d & (a ^ b))) + ms_buffer1x[16 * i + 14] ;c = (c << 11) | (c >> 21);\n\t\tb += (a ^ (c & (d ^ a)))/*+ms_buffer1x[16 * i + 15]*/;b = (b << 19) | (b >> 13);\n\n\t\t/* Round 2 */\n\t\ta += ((b & (c | d)) | (c & d)) + ms_buffer1x[16 * i + 0] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + ms_buffer1x[16 * i + 4] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + ms_buffer1x[16 * i + 8] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a)) + ms_buffer1x[16 * i + 12] + SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\ta += ((b & (c | d)) | (c & d)) + ms_buffer1x[16 * i + 1] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + ms_buffer1x[16 * i + 5] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + ms_buffer1x[16 * i + 9] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a)) + ms_buffer1x[16 * i + 13] + SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\ta += ((b & (c | d)) | (c & d)) + ms_buffer1x[16 * i + 2] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + ms_buffer1x[16 * i + 6] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + ms_buffer1x[16 * i + 10] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a)) + ms_buffer1x[16 * i + 14] + SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\ta += ((b & (c | d)) | (c & d)) + ms_buffer1x[16 * i + 3] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + ms_buffer1x[16 * i + 7] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + ms_buffer1x[16 * i + 11] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a))/*+ms_buffer1x[16 * i + 15]*/+SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\t/* Round 3 */\n\t\ta += (b ^ c ^ d) + ms_buffer1x[16 * i + 0] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + ms_buffer1x[16 * i + 8] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + ms_buffer1x[16 * i + 4] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) + ms_buffer1x[16 * i + 12] + SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\ta += (b ^ c ^ d) + ms_buffer1x[16 * i + 2] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + ms_buffer1x[16 * i + 10] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + ms_buffer1x[16 * i + 6] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) + ms_buffer1x[16 * i + 14] + SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\ta += (b ^ c ^ d) + ms_buffer1x[16 * i + 1] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + ms_buffer1x[16 * i + 9] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + ms_buffer1x[16 * i + 5] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) + ms_buffer1x[16 * i + 13] + SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\ta += (b ^ c ^ d) + ms_buffer1x[16 * i + 3] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + ms_buffer1x[16 * i + 11] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + ms_buffer1x[16 * i + 7] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) /*+ ms_buffer1x[16 * i + 15] */+ SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\tlast[4 * i + 0] = a + INIT_A;\n\t\tlast[4 * i + 1] = b + INIT_B;\n\t\tlast[4 * i + 2] = c + INIT_C;\n\t\tlast[4 * i + 3] = d + INIT_D;\n\t} #pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/timeroast_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t);\n#if !ARCH_LITTLE_ENDIAN\n\t\tswap(last, count * 4);\n#endif\n\t\tnew_key = 0;\n\t}\n\n#if defined(_OPENMP)\nfor (i = 0; i < count; i++) {\n\t\tMD5_CTX ctx;\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, &((unsigned char*)last)[16 * i], 16);\n\t\tMD5_Update(&ctx, salt_buffer, SALT_SIZE);\n\t\tMD5_Final(&((unsigned char*)output1x)[16 * i], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/tcp_md5_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tMD5_CTX ctx;\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, cur_salt->salt, cur_salt->length);\n\n\t\tMD5_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DOMINOSEC_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += 3) {\n\t\tint i, j;\n\n\t\tif (keys_changed) {\n\t\t\tchar *k0 = saved_key[index];\n\t\t\tchar *k1 = saved_key[index + 1];\n\t\t\tchar *k2 = saved_key[index + 2];\n\t\t\tunsigned char digest16[3][16];\n\t\t\tdomino_big_md_3((unsigned char *)k0, strlen(k0),\n\t\t\t (unsigned char *)k1, strlen(k1),\n\t\t\t (unsigned char *)k2, strlen(k2),\n\t\t\t digest16[0], digest16[1], digest16[2]);\n\n\t\t\t/* Not (++i < 16) !\n\t\t\t * Domino will do hash of first 34 bytes ignoring The Fact that now\n\t\t\t * there is a salt at a beginning of buffer. This means that last 5\n\t\t\t * bytes \"EEFF)\" of password digest are meaningless.\n\t\t\t */\n\n\t\t\tfor (i = 0, j = 6; i < 14; i++, j += 2) {\n\t\t\t\tconst char *hex2 = hex_table[ARCH_INDEX(digest16[0][i])];\n\t\t\t\tdigest34[index][j] = hex2[0];\n\t\t\t\tdigest34[index][j + 1] = hex2[1];\n\t\t\t\thex2 = hex_table[ARCH_INDEX(digest16[1][i])];\n\t\t\t\tdigest34[index + 1][j] = hex2[0];\n\t\t\t\tdigest34[index + 1][j + 1] = hex2[1];\n\t\t\t\thex2 = hex_table[ARCH_INDEX(digest16[2][i])];\n\t\t\t\tdigest34[index + 2][j] = hex2[0];\n\t\t\t\tdigest34[index + 2][j + 1] = hex2[1];\n\t\t\t}\n\t\t}\n\n\t\tif (salt_changed) {\n\t\t\tdigest34[index + 2][0] = digest34[index + 1][0] =\n\t\t\t digest34[index][0] = saved_salt[0];\n\t\t\tdigest34[index + 2][1] = digest34[index + 1][1] =\n\t\t\t digest34[index][1] = saved_salt[1];\n\t\t\tdigest34[index + 2][2] = digest34[index + 1][2] =\n\t\t\t digest34[index][2] = saved_salt[2];\n\t\t\tdigest34[index + 2][3] = digest34[index + 1][3] =\n\t\t\t digest34[index][3] = saved_salt[3];\n\t\t\tdigest34[index + 2][4] = digest34[index + 1][4] =\n\t\t\t digest34[index][4] = saved_salt[4];\n\t\t\tdigest34[index + 2][5] = digest34[index + 1][5] =\n\t\t\t digest34[index][5] = '(';\n\t\t}\n\n\t\tdomino_big_md_3_34(digest34[index], digest34[index + 1],\n\t\t digest34[index + 2],\n\t\t (unsigned char *)crypt_out[index],\n\t\t (unsigned char *)crypt_out[index + 1],\n\t\t (unsigned char *)crypt_out[index + 2]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/oracle_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint idx = 0;\n\n#ifdef _OPENMP\nfor (idx = 0; idx < count; idx++) {\n\t\tunsigned char buf[sizeof(cur_salt)];\n\t\tunsigned char buf2[SALT_SIZE + PLAINTEXT_LENGTH*2];\n\t\tDES_key_schedule sched_local;\n\t\tunsigned int l;\n\n\t\tl = salt_length + key_length[idx];\n\t\tmemcpy(buf2, cur_salt, salt_length);\n\t\tmemcpy(buf2 + salt_length, cur_key[idx], key_length[idx]);\n#ifdef DEBUG_ORACLE\n\t\tdump_stuff_msg(\"cur_salt \", buf2, salt_length+key_length[idx]);\n\n\t\tcrypt_key[idx][0] = 0;\n\t\tcrypt_key[idx][1] = 0;\n\n\t\tDES_ncbc_encrypt(buf2, buf, l, &desschedule_static, (DES_cblock *) crypt_key[idx], DES_ENCRYPT);\n\t\tDES_set_key_unchecked((DES_cblock *)crypt_key[idx], &sched_local);\n\t\tcrypt_key[idx][0] = 0;\n\t\tcrypt_key[idx][1] = 0;\n\t\tDES_ncbc_encrypt(buf2, buf, l, &sched_local, (DES_cblock *) crypt_key[idx], DES_ENCRYPT);\n\n#ifdef DEBUG_ORACLE\n\t\tdump_stuff_msg(\" crypt_key \", (unsigned char*)&crypt_key[idx][0], 8);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/wpapsk.h", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, outbuffer, msg, mic)", "context_chars": 100, "text": "\"PMK Name\";\n\n\t\tmemcpy(msg + 8, hccap->mac1, 6);\n\t\tmemcpy(msg + 14, hccap->mac2, 6);\n\n#ifdef _OPENMP\n/* Create \"keymic\" that is actually PMKID */\n\t\tfor (i = 0; i < count; i++) {\n\t\t\thmac_sha1((unsigned char*)outbuffer[i].v, 32,\n\t\t\t msg, 20, mic[i].keymic, 16);\n\t\t} #pragma omp parallel for default(none) private(i) shared(count, outbuffer, msg, mic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/wpapsk.h", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, outbuffer, data, hccap, mic)", "context_chars": 100, "text": "t_mac(hccap, data);\n\t\tinsert_nonce(hccap, data + 12);\n\t}\n\n\tif (hccap->keyver == 1) {\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\t\tunion {\n\t\t\t\tuint32_t u32[20/4];\n\t\t\t\tunsigned char uc[20];\n\t\t\t\tuint64_t dummy; /* alignment for hmac_md5_init_K16() */\n\t\t\t} prf;\n\t\t\tHMACMD5Context ctx;\n\n\t\t\tprf_512(outbuffer[i].v, data, prf.u32); // PTK\n\t\t\thmac_md5_init_K16(prf.uc, &ctx);\n\t\t\thmac_md5_update(hccap->eapol, hccap->eapol_size, &ctx);\n\t\t\thmac_md5_final(mic[i].keymic, &ctx);\n\t\t} #pragma omp parallel for default(none) private(i) shared(count, outbuffer, data, hccap, mic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/wpapsk.h", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, outbuffer, data, hccap, mic)", "context_chars": 100, "text": "&ctx);\n\t\t\thmac_md5_final(mic[i].keymic, &ctx);\n\t\t}\n\t} else if (hccap->keyver == 2) {\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\t\tuint32_t prf[20/4];\n\n\t\t\tprf_512(outbuffer[i].v, data, prf); // PTK\n\t\t\thmac_sha1((unsigned char*)prf, 16, hccap->eapol,\n\t\t\t hccap->eapol_size, mic[i].keymic, 16);\n\t\t} #pragma omp parallel for default(none) private(i) shared(count, outbuffer, data, hccap, mic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/wpapsk.h", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, outbuffer, data, hccap, mic)", "context_chars": 100, "text": "#if HAVE_OPENSSL_CMAC_H\n\t} else if (hccap->keyver == 3) { // 802.11w, WPA-PSK-SHA256\n#ifdef _OPENMP\nfor (i = 0; i < count; i++) {\n\t\t\tunsigned char ptk[48];\n\t\t\tunsigned char cmic[16];\n\t\t\tsize_t miclen;\n\t\t\tCMAC_CTX *ctx;\n\n\t\t\tsha256_prf_bits((unsigned char*)outbuffer[i].v, 32, \"Pairwise key expansion\", data, 76, ptk, 48 * 8); // PTK\n\n\t\t\t// Compute MIC\n\t\t\tctx = CMAC_CTX_new();\n\t\t\tCMAC_Init(ctx, ptk, 16, EVP_aes_128_cbc(), 0);\n\t\t\tCMAC_Update(ctx, hccap->eapol, hccap->eapol_size);\n\t\t\tCMAC_Final(ctx, cmic, &miclen);\n\t\t\tmemcpy(mic[i].keymic, cmic, 16);\n\t\t\tCMAC_CTX_free(ctx);\n\t\t} #pragma omp parallel for default(none) private(i) shared(count, outbuffer, data, hccap, mic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/hmacSHA256_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "LAGS\n#else\n\tconst int B_LEN\n#endif\n\t)\n{\n\tconst int count = *pcount;\n\tint index = 0;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_32\n\t\tunsigned int i, *pclear;\n\n\t\tif (new_keys) {\n\t\t\tSIMDSHA256body(&ipad[index * PAD_SIZE],\n\t\t\t (unsigned int*)&prep_ipad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN|EX_FLAGS);\n\t\t\tSIMDSHA256body(&opad[index * PAD_SIZE],\n\t\t\t (unsigned int*)&prep_opad[index * BINARY_SIZE],\n\t\t\t NULL, SSEi_MIXED_IN|EX_FLAGS);\n\t\t}\n\n\t\tSIMDSHA256body(cur_salt->salt[0],\n\t\t\t (unsigned int*)&crypt_key[index * PAD_SIZE],\n\t\t\t (unsigned int*)&prep_ipad[index * BINARY_SIZE],\n\t\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);\n\t\tfor (i = 1; i <= (cur_salt->salt_len + 8) / PAD_SIZE; i++)\n\t\t\tSIMDSHA256body(cur_salt->salt[i],\n\t\t\t (unsigned int*)&crypt_key[index * PAD_SIZE],\n\t\t\t (unsigned int*)&crypt_key[index * PAD_SIZE],\n\t\t\t SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);\n\n\t\tif (EX_FLAGS) {\n\t\t\t// NOTE, SSESHA224 will output 32 bytes. We need the first 28 (plus the 0x80 padding).\n\t\t\t// so we are forced to 'clean' this crap up, before using the crypt as the input.\n\t\t\tpclear = (unsigned int*)&crypt_key[(unsigned int)index/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32*4];\n\t\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; i++)\n\t\t\t\tpclear[28/4*SIMD_COEF_32+(i&(SIMD_COEF_32-1))+i/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32] = 0x80000000;\n\t\t}\n\t\tSIMDSHA256body(&crypt_key[index * PAD_SIZE],\n\t\t (unsigned int*)&crypt_key[index * PAD_SIZE],\n\t\t (unsigned int*)&prep_opad[index * BINARY_SIZE],\n\t\t SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);\n#else\n\t\tSHA256_CTX ctx;\n\t\t// Note, for oSSL, we really only need SHA256_Init and SHA224_Init. From that point\n\t\t// on, SHA256_Update/SHA256_Final can be used. Also, jtr internal sha2.c file works\n\t\t// like that. BUT I am not sure every hash engine works that way, so we are keeping\n\t\t// the 'full' block.\n\t\tif (B_LEN == BINARY_SIZE) {\n\t\t\tif (new_keys) {\n\t\t\t\tSHA256_Init(&ipad_ctx[index]);\n\t\t\t\tSHA256_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);\n\t\t\t\tSHA256_Init(&opad_ctx[index]);\n\t\t\t\tSHA256_Update(&opad_ctx[index], opad[index], PAD_SIZE);\n\t\t\t}\n\n\t\t\tmemcpy(&ctx, &ipad_ctx[index], sizeof(ctx));\n\t\t\tSHA256_Update( &ctx, cur_salt, strlen( (char*) cur_salt) );\n\t\t\tSHA256_Final( (unsigned char*) crypt_key[index], &ctx);\n\n\t\t\tmemcpy(&ctx, &opad_ctx[index], sizeof(ctx));\n\t\t\tSHA256_Update( &ctx, crypt_key[index], B_LEN);\n\t\t\tSHA256_Final( (unsigned char*) crypt_key[index], &ctx);\n\t\t} else {\n\t\t\tif (new_keys) {\n\t\t\t\tSHA224_Init(&ipad_ctx[index]);\n\t\t\t\tSHA224_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);\n\t\t\t\tSHA224_Init(&opad_ctx[index]);\n\t\t\t\tSHA224_Update(&opad_ctx[index], opad[index], PAD_SIZE);\n\t\t\t}\n\n\t\t\tmemcpy(&ctx, &ipad_ctx[index], sizeof(ctx));\n\t\t\tSHA224_Update( &ctx, cur_salt, strlen( (char*) cur_salt) );\n\t\t\tSHA224_Final( (unsigned char*) crypt_key[index], &ctx);\n\n\t\t\tmemcpy(&ctx, &opad_ctx[index], sizeof(ctx));\n\t\t\tSHA224_Update( &ctx, crypt_key[index], B_LEN);\n\t\t\tSHA224_Final( (unsigned char*) crypt_key[index], &ctx);\n\t\t}\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/siemens-s7_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char buf[20];\n\t\tSHA_CTX ctx;\n\t\tif (new_keys) {\n\t\t\tunsigned char pad[20];\n\t\t\tint i;\n\n\t\t\tSHA1_Init(&ctx);\n\t\t\tSHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\t\tSHA1_Final(buf, &ctx);\n\t\t\tfor (i = 0; i < 20; ++i) {\n\t\t\t\tpad[i] = buf[i] ^ 0x36;\n\t\t\t}\n\t\t\tSHA1_Init(&ipad_ctx[index]);\n\t\t\tSHA1_Update(&ipad_ctx[index], pad, 20);\n\t\t\tSHA1_Update(&ipad_ctx[index], \"\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\\x36\", 44);\n\t\t\tfor (i = 0; i < 20; ++i) {\n\t\t\t\tpad[i] = buf[i] ^ 0x5C;\n\t\t\t}\n\t\t\tSHA1_Init(&opad_ctx[index]);\n\t\t\tSHA1_Update(&opad_ctx[index], pad, 20);\n\t\t\tSHA1_Update(&opad_ctx[index], \"\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\\x5C\", 44);\n\t\t}\n\t\tmemcpy(&ctx, &ipad_ctx[index], sizeof(ctx));\n\t\tSHA1_Update(&ctx, challenge, 20);\n\t\tSHA1_Final(buf, &ctx);\n\t\tmemcpy(&ctx, &opad_ctx[index], sizeof(ctx));\n\t\tSHA1_Update(&ctx, buf, 20);\n\t\tSHA1_Final((unsigned char*)(crypt_out[index]), &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/postgres_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tMD5_CTX ctx;\n\t\tunsigned char out[32];\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tMD5_Update(&ctx, cur_salt->user, strlen((char*)cur_salt->user));\n\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\n\t\thex_encode((unsigned char*)crypt_out[index], 16, out);\n\n\t\tMD5_Init(&ctx);\n\t\tMD5_Update(&ctx, out, 32);\n\t\tMD5_Update(&ctx, cur_salt->salt, 4);\n\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/haval_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " crypt_256_3(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_haval256_3_context ctx;\n\n\t\tsph_haval256_3_init(&ctx);\n\t\tsph_haval256_3(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_haval256_3_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/haval_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " crypt_128_4(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tsph_haval128_4_context ctx;\n\n\t\tsph_haval128_4_init(&ctx);\n\t\tsph_haval128_4(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tsph_haval128_4_close(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mdc2_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tJtR_MDC2_CTX ctx;\n\t\tJtR_MDC2_Init(&ctx);\n\t\tJtR_MDC2_Update(&ctx, (unsigned char*)saved_key[index], saved_len[index]);\n\t\tJtR_MDC2_Final((unsigned char*)crypt_out[index], &ctx);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/strip_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tunsigned char output[24];\n\t\tunsigned char *iv_in;\n\t\tunsigned char iv_out[16];\n\t\tint size,i;\n\t\tint page_sz = 1008; /* 1024 - strlen(SQLITE_FILE_HEADER) */\n\t\tint reserve_sz = 16; /* for HMAC off case */\n\t\tAES_KEY akey;\n\n#ifdef SIMD_COEF_32\n\t\tint len[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlen[i] = strlen(saved_key[i+index]);\n\t\t\tpin[i] = (unsigned char*)saved_key[i+index];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, 16, ITERATIONS, pout, 32, 0);\n#else\n\t\tpbkdf2_sha1((unsigned char *)saved_key[index],\n\t\t strlen(saved_key[index]), cur_salt->salt,\n\t\t 16, ITERATIONS, master[0], 32, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\t// memcpy(output, SQLITE_FILE_HEADER, FILE_HEADER_SZ);\n\t\t\tsize = page_sz - reserve_sz;\n\t\t\tiv_in = cur_salt->data + size + 16;\n\t\t\tmemcpy(iv_out, iv_in, 16);\n\n\t\t\tAES_set_decrypt_key(master[i], 256, &akey);\n\t\t\t/*\n\t\t\t * Decrypting 8 bytes from offset 16 is enough since the\n\t\t\t * verify_page function looks at output[16..23] only.\n\t\t\t */\n\t\t\tAES_cbc_encrypt(cur_salt->data + 16, output + 16, 8, &akey, iv_out, AES_DECRYPT);\n\t\t\tif (strip_verify_page(output) == 0) {\n\t\t\t\tcracked[index+i] = 1;\n\t\t\t}\n\t\t\telse\n\t\t\t\tcracked[index+i] = 0;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/keychain_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n\t\tunsigned char master[MIN_KEYS_PER_CRYPT][32];\n\t\tint i;\n#ifdef SIMD_COEF_32\n\t\tint lens[MIN_KEYS_PER_CRYPT];\n\t\tunsigned char *pin[MIN_KEYS_PER_CRYPT], *pout[MIN_KEYS_PER_CRYPT];\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tpout[i] = master[i];\n\t\t}\n\t\tpbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt, SALTLEN, 1000, pout, 24, 0);\n#else\n\t\tpbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]), cur_salt->salt, SALTLEN, 1000, master[0], 24, 0);\n\n\t\tfor (i = 0; i < MIN_KEYS_PER_CRYPT; ++i) {\n\t\t\tif (kcdecrypt(master[i], cur_salt->iv, cur_salt->ct) == 0)\n\t\t\t\tcracked[index+i] = 1;\n\t\t\telse\n\t\t\t\tcracked[index+i] = 0;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/opencl_electrum_modern_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " NULL, multi_profilingEvent[3]), \"Copy result back\");\n\n\tif (!ocl_autotune_running) {\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\t\tBIGNUM *p, *q, *r;\n\t\t\tBN_CTX *ctx;\n\t\t\tuint64_t u[8];\n\t\t\tunsigned char static_privkey[64];\n\t\t\tunsigned char shared_pubkey[33];\n\t\t\tunsigned char keys[128];\n\t\t\tunsigned char cmac[32];\n\t\t\tsecp256k1_context *sctx;\n\t\t\tSHA512_CTX md_ctx;\n\t\t\tint shared_pubkeylen= 33;\n\t\t\tint j;\n\n\t\t\tmemcpy(u, host_crack[index].hash, 64);\n\t\t\tfor (j = 0; j < 8; j++)\n\t\t\t\tu[j] = JOHNSWAP64(u[j]);\n\t\t\tmemcpy(static_privkey, u, 64);\n\t\t\t// do static_privkey % GROUP_ORDER\n\t\t\tp = BN_bin2bn(static_privkey, 64, NULL);\n\t\t\tq = BN_new();\n\t\t\tr = BN_new();\n\t\t\tBN_hex2bn(&q, group_order);\n\t\t\tctx = BN_CTX_new();\n\t\t\tBN_mod(r, p, q, ctx);\n\t\t\tBN_CTX_free(ctx);\n\t\t\tBN_free(p);\n\t\t\tBN_free(q);\n\t\t\tBN_bn2binpad32(r, static_privkey);\n\t\t\tBN_free(r);\n\t\t\tsctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE);\n\t\t\t// multiply point with a scaler, shared_pubkey is compressed representation\n\t\t\tsecp256k1_mul(sctx, shared_pubkey, &cur_salt->pubkey, static_privkey);\n\t\t\tsecp256k1_context_destroy(sctx);\n\t\t\tSHA512_Init(&md_ctx);\n\t\t\tSHA512_Update(&md_ctx, shared_pubkey, shared_pubkeylen);\n\t\t\tSHA512_Final(keys, &md_ctx);\n\t\t\tif (cur_salt->type == 4) {\n\t\t\t\t// calculate mac of data\n\t\t\t\thmac_sha256(keys + 32, 32, cur_salt->data, cur_salt->datalen, cmac, 32);\n\t\t\t\tmemcpy(crypt_out[index], cmac, BINARY_SIZE);\n\t\t\t} else if (cur_salt->type == 5) {\n\t\t\t\tz_stream z;\n\t\t\t\tunsigned char iv[16];\n\t\t\t\tunsigned char out[512] = { 0 };\n\t\t\t\tunsigned char fout[512] = { 0 };\n\t\t\t\tAES_KEY aes_decrypt_key;\n\n\t\t\t\t// common zlib settings\n\t\t\t\tz.zalloc = Z_NULL;\n\t\t\t\tz.zfree = Z_NULL;\n\t\t\t\tz.opaque = Z_NULL;\n\t\t\t\tz.avail_in = 512;\n\t\t\t\tz.avail_out = 512;\n\t\t\t\tz.next_out = fout;\n\n\t\t\t\tmemcpy(iv, keys, 16);\n\t\t\t\tmemset(crypt_out[index], 0, BINARY_SIZE);\n\t\t\t\t// fast zlib based rejection test, is this totally safe?\n\t\t\t\tAES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key);\n\t\t\t\tAES_cbc_encrypt(cur_salt->data, out, 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\t\t\t\tif ((memcmp(out, \"\\x78\\x9c\", 2) != 0) || (((out[2] & 0x7) != 0x4) && ((out[2] & 0x7) != 0x5))) {\n\t\t\t\t} else {\n\t\t\t\t\tAES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key);\n\t\t\t\t\tAES_cbc_encrypt(cur_salt->data + 16, out + 16, 512 - 16, &aes_decrypt_key, iv, AES_DECRYPT);\n\t\t\t\t\tz.next_in = out;\n\t\t\t\t\tinflateInit2(&z, 15);\n\t\t\t\t\tinflate(&z, Z_NO_FLUSH);\n\t\t\t\t\tinflateEnd(&z);\n\t\t\t\t\tif ((memcmp(fout, EXPECTED_BYTES_1, 7) == 0) || (memcmp(fout, EXPECTED_BYTES_2, 8) == 0))\n\t\t\t\t\t\tmemcpy(crypt_out[index], cur_salt->mac, BINARY_SIZE); // dirty hack!\n\t\t\t\t}\n\t\t\t}\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs.c", "omp_pragma_line": "#pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, depth, t) shared(n, DES_bs_all_p, retval, binary)", "context_chars": 100, "text": " (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;\n#endif\n\tint retval = 0;\n\n#if defined(_OPENMP) && DES_BS_VECTOR\n#elif defined(_OPENMP)\n#pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, t) shared(n, DES_bs_all_p, retval, binary)\n\n\tfor_each_t(n)\n\tfor_each_depth() {\n\t\tvalue = binary[0];\n\t\tb = (DES_bs_vector *)&DES_bs_all.B[0] DEPTH;\n\n\t\tmask = b[0] START ^ -(value & 1);\n\t\tmask |= b[1] START ^ -((value >> 1) & 1);\n\t\tmask |= b[2] START ^ -((value >> 2) & 1);\n\t\tmask |= b[3] START ^ -((value >> 3) & 1);\n\t\tif (mask == ~(ARCH_WORD)0) goto next_depth;\n\t\tvalue >>= 4;\n\t\tb += 4;\n\t\tfor (bit = 4; bit < 32; bit += 2) {\n\t\t\tmask |= b[0] START ^\n\t\t\t\t-(value & 1);\n\t\t\tif (mask == ~(ARCH_WORD)0) goto next_depth;\n\t\t\tmask |= b[1] START ^\n\t\t\t\t-((value >> 1) & 1);\n\t\t\tif (mask == ~(ARCH_WORD)0) goto next_depth;\n\t\t\tvalue >>= 2;\n\t\t\tb += 2;\n\t\t}\n\n#ifdef _OPENMP\n\t\tretval = 1;\n#else\n\t\treturn 1;\n\nnext_depth:\n\t\t;\n\t} #pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, depth, t) shared(n, DES_bs_all_p, retval, binary)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs.c", "omp_pragma_line": "#pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, t) shared(n, DES_bs_all_p, retval, binary)", "context_chars": 100, "text": "ivate(value, mask, bit, b, depth, t) shared(n, DES_bs_all_p, retval, binary)\n#elif defined(_OPENMP)\nfor_each_t(n)\n\tfor_each_depth() {\n\t\tvalue = binary[0];\n\t\tb = (DES_bs_vector *)&DES_bs_all.B[0] DEPTH;\n\n\t\tmask = b[0] START ^ -(value & 1);\n\t\tmask |= b[1] START ^ -((value >> 1) & 1);\n\t\tmask |= b[2] START ^ -((value >> 2) & 1);\n\t\tmask |= b[3] START ^ -((value >> 3) & 1);\n\t\tif (mask == ~(ARCH_WORD)0) goto next_depth;\n\t\tvalue >>= 4;\n\t\tb += 4;\n\t\tfor (bit = 4; bit < 32; bit += 2) {\n\t\t\tmask |= b[0] START ^\n\t\t\t\t-(value & 1);\n\t\t\tif (mask == ~(ARCH_WORD)0) goto next_depth;\n\t\t\tmask |= b[1] START ^\n\t\t\t\t-((value >> 1) & 1);\n\t\t\tif (mask == ~(ARCH_WORD)0) goto next_depth;\n\t\t\tvalue >>= 2;\n\t\t\tb += 2;\n\t\t}\n\n#ifdef _OPENMP\n\t\tretval = 1;\n#else\n\t\treturn 1;\n\nnext_depth:\n\t\t;\n\t} #pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, t) shared(n, DES_bs_all_p, retval, binary)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mscash1_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last)", "context_chars": 100, "text": " output1x[4*index+3] & PH_MASK_6; }\n\nstatic void nt_hash(int count)\n{\n\tint i;\n\n#if defined(_OPENMP)\nfor (i = 0; i < count; i++) {\n\t\tunsigned int a;\n\t\tunsigned int b;\n\t\tunsigned int c;\n\t\tunsigned int d;\n\n\t\t/* Round 1 */\n\t\ta = \t\t0xFFFFFFFF \t\t + ms_buffer1x[16*i+0];a = (a << 3 ) | (a >> 29);\n\t\td = INIT_D + (INIT_C ^ (a & 0x77777777)) + ms_buffer1x[16*i+1];d = (d << 7 ) | (d >> 25);\n\t\tc = INIT_C + (INIT_B ^ (d & (a ^ INIT_B)))+ ms_buffer1x[16*i+2];c = (c << 11) | (c >> 21);\n\t\tb = INIT_B + (a ^ (c & (d ^ a))) \t + ms_buffer1x[16*i+3];b = (b << 19) | (b >> 13);\n\n\t\ta += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+4] ;a = (a << 3 ) | (a >> 29);\n\t\td += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+5] ;d = (d << 7 ) | (d >> 25);\n\t\tc += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+6] ;c = (c << 11) | (c >> 21);\n\t\tb += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+7] ;b = (b << 19) | (b >> 13);\n\n\t\ta += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+8] ;a = (a << 3 ) | (a >> 29);\n\t\td += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+9] ;d = (d << 7 ) | (d >> 25);\n\t\tc += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+10] ;c = (c << 11) | (c >> 21);\n\t\tb += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+11] ;b = (b << 19) | (b >> 13);\n\n\t\ta += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+12] ;a = (a << 3 ) | (a >> 29);\n\t\td += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+13] ;d = (d << 7 ) | (d >> 25);\n\t\tc += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+14] ;c = (c << 11) | (c >> 21);\n\t\tb += (a ^ (c & (d ^ a)))/*+ms_buffer1x[16*i+15]*/;b = (b << 19) | (b >> 13);\n\n\t\t/* Round 2 */\n\t\ta += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+4] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+8] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+12] + SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\ta += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+5] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+9] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+13] + SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\ta += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+6] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+10] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+14] + SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\ta += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+7] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+11] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a))/*+ms_buffer1x[16*i+15]*/+SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\t/* Round 3 */\n\t\ta += (b ^ c ^ d) + ms_buffer1x[16*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + ms_buffer1x[16*i+8] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + ms_buffer1x[16*i+4] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) + ms_buffer1x[16*i+12] + SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\ta += (b ^ c ^ d) + ms_buffer1x[16*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + ms_buffer1x[16*i+10] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + ms_buffer1x[16*i+6] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) + ms_buffer1x[16*i+14] + SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\ta += (b ^ c ^ d) + ms_buffer1x[16*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + ms_buffer1x[16*i+9] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + ms_buffer1x[16*i+5] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) + ms_buffer1x[16*i+13] + SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\ta += (b ^ c ^ d) + ms_buffer1x[16*i+3] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + ms_buffer1x[16*i+11] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + ms_buffer1x[16*i+7] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) /*+ ms_buffer1x[16*i+15] */+ SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\tcrypt_out[4*i+0] = a + INIT_A;\n\t\tcrypt_out[4*i+1] = b + INIT_B;\n\t\tcrypt_out[4*i+2] = c + INIT_C;\n\t\tcrypt_out[4*i+3] = d + INIT_D;\n\n\t\t//Another MD4_crypt for the salt\n\t\t/* Round 1 */\n\t\ta= \t 0xFFFFFFFF \t +crypt_out[4*i+0]; a=(a<<3 )|(a>>29);\n\t\td=INIT_D + ( INIT_C ^ ( a & 0x77777777)) +crypt_out[4*i+1]; d=(d<<7 )|(d>>25);\n\t\tc=INIT_C + ( INIT_B ^ ( d & ( a ^ INIT_B))) +crypt_out[4*i+2]; c=(c<<11)|(c>>21);\n\t\tb=INIT_B + ( a ^ ( c & ( d ^ a ))) +crypt_out[4*i+3]; b=(b<<19)|(b>>13);\n\n\t\tlast[4*i+0]=a;\n\t\tlast[4*i+1]=b;\n\t\tlast[4*i+2]=c;\n\t\tlast[4*i+3]=d;\n\t} #pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mscash1_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(count, last, crypt_out, salt_buffer, output1x)", "context_chars": 100, "text": " count = *pcount;\n\tint i;\n\n\tif (new_key)\n\t{\n\t\tnew_key=0;\n\t\tnt_hash(count);\n\t}\n\n#if defined(_OPENMP)\nfor (i = 0; i < count; i++) {\n\t\tunsigned int a;\n\t\tunsigned int b;\n\t\tunsigned int c;\n\t\tunsigned int d;\n\n\t\ta = last[4*i+0];\n\t\tb = last[4*i+1];\n\t\tc = last[4*i+2];\n\t\td = last[4*i+3];\n\n\t\ta += (d ^ (b & (c ^ d))) + salt_buffer[0] ;a = (a << 3 ) | (a >> 29);\n\t\td += (c ^ (a & (b ^ c))) + salt_buffer[1] ;d = (d << 7 ) | (d >> 25);\n\t\tc += (b ^ (d & (a ^ b))) + salt_buffer[2] ;c = (c << 11) | (c >> 21);\n\t\tb += (a ^ (c & (d ^ a))) + salt_buffer[3] ;b = (b << 19) | (b >> 13);\n\n\t\ta += (d ^ (b & (c ^ d))) + salt_buffer[4] ;a = (a << 3 ) | (a >> 29);\n\t\td += (c ^ (a & (b ^ c))) + salt_buffer[5] ;d = (d << 7 ) | (d >> 25);\n\t\tc += (b ^ (d & (a ^ b))) + salt_buffer[6] ;c = (c << 11) | (c >> 21);\n\t\tb += (a ^ (c & (d ^ a))) + salt_buffer[7] ;b = (b << 19) | (b >> 13);\n\n\t\ta += (d ^ (b & (c ^ d))) + salt_buffer[8] ;a = (a << 3 ) | (a >> 29);\n\t\td += (c ^ (a & (b ^ c))) + salt_buffer[9] ;d = (d << 7 ) | (d >> 25);\n\t\tc += (b ^ (d & (a ^ b))) + salt_buffer[10] ;c = (c << 11) | (c >> 21);\n\t\tb += (a ^ (c & (d ^ a)))/*+salt_buffer[11]*/;b = (b << 19) | (b >> 13);\n\n\t\t/* Round 2 */\n\t\ta += ((b & (c | d)) | (c & d)) + crypt_out[4*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + salt_buffer[0] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + salt_buffer[4] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a)) + salt_buffer[8] + SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\ta += ((b & (c | d)) | (c & d)) + crypt_out[4*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + salt_buffer[1] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + salt_buffer[5] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a)) + salt_buffer[9] + SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\ta += ((b & (c | d)) | (c & d)) + crypt_out[4*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + salt_buffer[2] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + salt_buffer[6] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a)) + salt_buffer[10] + SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\ta += ((b & (c | d)) | (c & d)) + crypt_out[4*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29);\n\t\td += ((a & (b | c)) | (b & c)) + salt_buffer[3] + SQRT_2; d = (d << 5 ) | (d >> 27);\n\t\tc += ((d & (a | b)) | (a & b)) + salt_buffer[7] + SQRT_2; c = (c << 9 ) | (c >> 23);\n\t\tb += ((c & (d | a)) | (d & a))/*+ salt_buffer[11]*/+ SQRT_2; b = (b << 13) | (b >> 19);\n\n\t\t/* Round 3 */\n\t\ta += (b ^ c ^ d) + crypt_out[4*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + salt_buffer[4] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + salt_buffer[0] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) + salt_buffer[8] + SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\ta += (b ^ c ^ d) + crypt_out[4*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + salt_buffer[6] + SQRT_3; d = (d << 9 ) | (d >> 23);\n\t\tc += (d ^ a ^ b) + salt_buffer[2] + SQRT_3; c = (c << 11) | (c >> 21);\n\t\tb += (c ^ d ^ a) + salt_buffer[10] + SQRT_3; b = (b << 15) | (b >> 17);\n\n\t\ta += (b ^ c ^ d) + crypt_out[4*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29);\n\t\td += (a ^ b ^ c) + salt_buffer[5];\n\n\t\toutput1x[4*i+0]=a;\n\t\toutput1x[4*i+1]=b;\n\t\toutput1x[4*i+2]=c;\n\t\toutput1x[4*i+3]=d;\n\t} #pragma omp parallel for default(none) private(i) shared(count, last, crypt_out, salt_buffer, output1x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/has160_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ypt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\thas160_ctx ctx;\n\n\t\trhash_has160_init(&ctx);\n\t\trhash_has160_update(&ctx, (unsigned char*)saved_key[index], saved_len[index]);\n\t\trhash_has160_final(&ctx, (unsigned char*)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mongodb_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (cur_salt->type == 0) {\n\t\t\tMD5_CTX ctx;\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username));\n\t\t\tMD5_Update(&ctx, \":mongo:\", 7);\n\t\t\tMD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\t\t}\n\t\telse {\n\t\t\tunsigned char hexout[32];\n\t\t\tunsigned char out[32];\n\t\t\tMD5_CTX ctx;\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username));\n\t\t\tMD5_Update(&ctx, \":mongo:\", 7);\n\t\t\tMD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\t\tMD5_Final(out, &ctx);\n\t\t\thex_encode(out, 16, hexout);\n\t\t\tMD5_Init(&ctx);\n\t\t\tMD5_Update(&ctx, cur_salt->salt, 16);\n\t\t\tMD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username));\n\t\t\tMD5_Update(&ctx, hexout, 32);\n\t\t\tMD5_Final((unsigned char*)crypt_out[index], &ctx);\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/tacacs_plus_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ndex;\n\n\tif (any_cracked) {\n\t\tmemset(cracked, 0, cracked_size);\n\t\tany_cracked = 0;\n\t}\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tif (check_password(index, cur_salt)) {\n\t\t\tcracked[index] = 1;\n#ifdef _OPENMP\n#pragma omp atomic\n\n\t\t\tany_cracked |= 1;\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mysql_netauth_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n\t\tunsigned char stage1_hash[20];\n\t\tunsigned char inner_hash[20];\n\t\tunsigned char token[20];\n\t\tSHA_CTX ctx;\n\t\tint i;\n\t\tunsigned char *p = (unsigned char*)crypt_out[index];\n\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));\n\t\tSHA1_Final(stage1_hash, &ctx);\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, stage1_hash, 20);\n\t\tSHA1_Final(inner_hash, &ctx);\n\t\tSHA1_Init(&ctx);\n\t\tSHA1_Update(&ctx, cur_salt->scramble, 20);\n\t\tSHA1_Update(&ctx, inner_hash, 20);\n\t\tSHA1_Final(token, &ctx);\n\t\tfor (i = 0; i < 20; i++) {\n\t\t\tp[i] = token[i] ^ stage1_hash[i];\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/scrypt_fmt.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(index) shared(count, failed, max_threads, local, saved_salt, buffer)", "context_chars": 100, "text": "*pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\tint failed = 0;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index++) {\n#ifdef _OPENMP\n\t\tint t = omp_get_thread_num();\n\t\tif (t >= max_threads) {\n\t\t\tfailed = -1;\n\t\t\tcontinue;\n\t\t}\n#else\n\t\tconst int t = 0;\n\n\t\tuint8_t *hash;\n\t\thash = yescrypt_r(NULL, &local[t],\n\t\t (const uint8_t *)buffer[index].key,\n\t\t strlen(buffer[index].key),\n\t\t (const uint8_t *)saved_salt,\n\t\t NULL,\n\t\t (uint8_t *)buffer[index].out,\n\t\t sizeof(buffer[index].out));\n\t\tif (!hash) {\n\t\t\tfailed = errno ? errno : EINVAL;\n#ifndef _OPENMP\n\t\t\tbreak;\n\n\t\t}\n\t} #pragma omp parallel for default(none) private(index) shared(count, failed, max_threads, local, saved_salt, buffer)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/XSHA512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(index) shared(count, ctx_salt, saved_key, saved_len, crypt_out)", "context_chars": 100, "text": "nt count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\n#ifndef SIMD_COEF_64\n#ifdef PRECOMPUTE_CTX_FOR_SALT\n#else\n#pragma omp parallel for default(none) private(index) shared(count, saved_salt, saved_key, saved_len, crypt_out)\n\n#else\n#pragma omp parallel for\n\n\n\tfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_64\n\t\tSIMDSHA512body(&saved_key[index/MIN_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN);\n#else\n\t\tSHA512_CTX ctx;\n#ifdef PRECOMPUTE_CTX_FOR_SALT\n\t\tmemcpy(&ctx, &ctx_salt, sizeof(ctx));\n#else\n\t\tSHA512_Init(&ctx);\n\t\tSHA512_Update(&ctx, &saved_salt, SALT_SIZE);\n\n\t\tSHA512_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA512_Final((unsigned char *)(crypt_out[index]), &ctx);\n\n\t} #pragma omp parallel for default(none) private(index) shared(count, ctx_salt, saved_key, saved_len, crypt_out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/XSHA512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(index) shared(count, saved_salt, saved_key, saved_len, crypt_out)", "context_chars": 100, "text": "lel for default(none) private(index) shared(count, ctx_salt, saved_key, saved_len, crypt_out)\n#else\n#else\n#pragma omp parallel for\n\n\n\tfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_64\n\t\tSIMDSHA512body(&saved_key[index/MIN_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN);\n#else\n\t\tSHA512_CTX ctx;\n#ifdef PRECOMPUTE_CTX_FOR_SALT\n\t\tmemcpy(&ctx, &ctx_salt, sizeof(ctx));\n#else\n\t\tSHA512_Init(&ctx);\n\t\tSHA512_Update(&ctx, &saved_salt, SALT_SIZE);\n\n\t\tSHA512_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA512_Final((unsigned char *)(crypt_out[index]), &ctx);\n\n\t} #pragma omp parallel for default(none) private(index) shared(count, saved_salt, saved_key, saved_len, crypt_out)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/XSHA512_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "efault(none) private(index) shared(count, saved_salt, saved_key, saved_len, crypt_out)\n#endif\n#else\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SIMD_COEF_64\n\t\tSIMDSHA512body(&saved_key[index/MIN_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN);\n#else\n\t\tSHA512_CTX ctx;\n#ifdef PRECOMPUTE_CTX_FOR_SALT\n\t\tmemcpy(&ctx, &ctx_salt, sizeof(ctx));\n#else\n\t\tSHA512_Init(&ctx);\n\t\tSHA512_Update(&ctx, &saved_salt, SALT_SIZE);\n\n\t\tSHA512_Update(&ctx, saved_key[index], saved_len[index]);\n\t\tSHA512_Final((unsigned char *)(crypt_out[index]), &ctx);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pbkdf2_hmac_sha256_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pt_all(int *pcount, struct db_salt *salt)\n{\n\tconst int count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\nfor (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) {\n#ifdef SSE_GROUP_SZ_SHA256\n\t\tint lens[SSE_GROUP_SZ_SHA256], i;\n\t\tunsigned char *pin[SSE_GROUP_SZ_SHA256];\n\t\tunion {\n\t\t\tuint32_t *pout[SSE_GROUP_SZ_SHA256];\n\t\t\tunsigned char *poutc;\n\t\t} x;\n\t\tfor (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) {\n\t\t\tlens[i] = strlen(saved_key[index+i]);\n\t\t\tpin[i] = (unsigned char*)saved_key[index+i];\n\t\t\tx.pout[i] = crypt_out[index+i];\n\t\t}\n\t\tpbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA256_BINARY_SIZE, 0);\n#else\n\t\tpbkdf2_sha256((const unsigned char*)(saved_key[index]), strlen(saved_key[index]),\n\t\t\tcur_salt->salt, cur_salt->length,\n\t\t\tcur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA256_BINARY_SIZE, 0);\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sm3_fmt_plug.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t crypt_all(int *pcount, struct db_salt *salt)\n{\n\tint count = *pcount;\n\tint index;\n\n#ifdef _OPENMP\n\tfor (index = 0; index < count; index++) {\n\t\tsm3_ctx ctx;\n\n\t\tsm3_init(&ctx);\n\t\tsm3_update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index]));\n\t\tsm3_final(&ctx, (unsigned char *)crypt_out[index]);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/yescrypt/yescrypt-opt.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, VROM, XY, S)", "context_chars": 100, "text": "\n\t\tsmix(B, r, N, p, t, flags, V, NROM, VROM, XY, S, sha256);\n\t} else {\n\t\tuint32_t i;\n#ifdef _OPENMP\nfor (i = 0; i < p; i++) {\n#ifdef _OPENMP\n\t\t\tsmix(&B[(size_t)128 * r * i], r, N, 1, t, flags,\n\t\t\t &V[(size_t)2 * r * i * N],\n\t\t\t NROM, VROM,\n\t\t\t &XY[(size_t)4 * r * i], NULL, NULL);\n#else\n\t\t\tsmix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,\n\t\t\t NROM, VROM, XY, NULL, NULL);\n\n\t\t} #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, VROM, XY, S)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/yescrypt/userom.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(n, shared, thread_data, setting, seed, count, save, nsave, key)", "context_chars": 100, "text": ");\n\t\t\t}\n\t\t}\n\n\t\tstart = times(&start_tms);\n\n\t\tn = count * omp_get_max_threads();\n\t\tcount = 0;\n\t\tdo {\nfor (i = 0; i < n; i++) {\n\t\t\t\tunsigned int j = count + i;\n\t\t\t\tchar p[32];\n\t\t\t\tuint8_t hash[128];\n\t\t\t\tsnprintf(p, sizeof(p), \"%u\", seed + j);\n\t\t\t\tthread_data_s *td = &thread_data[omp_get_thread_num()].s;\n\t\t\t\tuint64_t start1 = time_us();\n#if 1\n\t\t\t\tconst char *h = (const char *)yescrypt_r(\n\t\t\t\t shared, &td->local,\n\t\t\t\t (const uint8_t *)p, strlen(p),\n\t\t\t\t setting, &key, hash, sizeof(hash));\n#else\n\t\t\t\tyescrypt_local_t local;\n\t\t\t\tyescrypt_init_local(&local);\n\t\t\t\tconst char *h = (const char *)yescrypt_r(\n\t\t\t\t shared, &local,\n\t\t\t\t (const uint8_t *)p, strlen(p),\n\t\t\t\t setting, &key, hash, sizeof(hash));\n\t\t\t\tyescrypt_free_local(&local);\n\n\t\t\t\tuint64_t end1 = time_us();\n\t\t\t\tif (end1 < start1)\n\t\t\t\t\tend1 = start1;\n\t\t\t\tuint64_t diff1 = end1 - start1;\n\t\t\t\ttd->total += diff1;\n\t\t\t\tif (diff1 < td->min)\n\t\t\t\t\ttd->min = diff1;\n\t\t\t\tif (diff1 > td->max)\n\t\t\t\t\ttd->max = diff1;\n\t\t\t\tif (j < nsave && strcmp(save[j], h)) {\n#pragma omp critical\n\t\t\t\t\tprintf(\"Mismatch at %u, %s != %s\\n\",\n\t\t\t\t\t j, save[j], h);\n\t\t\t\t}\n\t\t\t} #pragma omp parallel for default(none) private(i) shared(n, shared, thread_data, setting, seed, count, save, nsave, key)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/php_mt_seed/php_mt_seed.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30, vvalue)", "context_chars": 100, "text": ";\n\t\tseed_shr_30 = seed >> 30;\n#endif\n\t}\n\n#ifdef _OPENMP\n#if defined(__SSE4_1__) || defined(__MIC__)\n#elif defined(__SSE2__)\n#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)\n#else\n#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)\n\n\n\tfor (base = start; base < end; base++) {\n\t\tuint32_t seed = (uint32_t)base << P;\n#if defined(__SSE2__) || defined(__MIC__)\n\t\ttypedef struct {\n\t\t\tvtype a, b, c, d, e, f, g, h;\n\t\t} atype;\n\t\tatype xM, x = {}, x710 = {};\n\t\t/* Hint to compiler not to waste registers */\n\t\tvolatile atype x1;\n\t\tconst vtype cone = _mm_set1_epi32(1);\n\t\tvtype vseed = _mm_set1_epi32(seed);\n\t\tversion_t version;\n\n#define DO(which, add) \\\n\txM.which = _mm_add_epi32(xM.a, _mm_set1_epi32(add));\n#if defined(__MIC__) || defined(__AVX512F__)\n\t\txM.a = _mm512_add_epi32(vseed, _mm512_set_epi32(\n\t\t 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30));\n\t\tDO(b, 1) DO(c, 32) DO(d, 33)\n\t\tDO(e, 64) DO(f, 65) DO(g, 96) DO(h, 97)\n#elif defined(__AVX2__)\n\t\txM.a = _mm256_add_epi32(vseed, _mm256_set_epi32(\n\t\t 0, 2, 4, 6, 8, 10, 12, 14));\n\t\tDO(b, 1) DO(c, 16) DO(d, 17)\n\t\tDO(e, 32) DO(f, 33) DO(g, 48) DO(h, 49)\n#else\n\t\txM.a = _mm_add_epi32(vseed, _mm_set_epi32(0, 2, 4, 6));\n\t\tDO(b, 1) DO(c, 8) DO(d, 9)\n\t\tDO(e, 16) DO(f, 17) DO(g, 24) DO(h, 25)\n\n#undef DO\n\n#define DO_ALL \\\n\tDO(x.a, x1.a, xM.a) \\\n\tDO(x.b, x1.b, xM.b) \\\n\tDO(x.c, x1.c, xM.c) \\\n\tDO(x.d, x1.d, xM.d) \\\n\tDO(x.e, x1.e, xM.e) \\\n\tDO(x.f, x1.f, xM.f) \\\n\tDO(x.g, x1.g, xM.g) \\\n\tDO(x.h, x1.h, xM.h)\n\n\t\tif (flavor == PHP_LEGACY) {\n\t\t\tconst vtype c69069 = _mm_set1_epi32(69069);\n\t\t\tconst vtype c69069to396 = _mm_set1_epi32(0x4396a0b1);\n\n#define DO(x, x1, xM) \\\n\txM = _mm_add_epi32(_mm_add_epi32(xM, xM), cone); \\\n\tx1 = xM = _mm_mullo_epi32(c69069, xM); \\\n\txM = _mm_mullo_epi32(c69069to396, xM);\n\t\t\tDO_ALL\n#undef DO\n\t\t} else {\n\t\t\tconst vtype cmul = _mm_set1_epi32(1812433253U);\n\t\t\tvtype vi = _mm_add_epi32(cone, cone);\n\t\t\tunsigned int n = (M - 1) / 22;\n\n#define DO(x, x1, xM) \\\n\tx1 = xM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, seed_shr_30), cone);\n\t\t\tDO_ALL\n#undef DO\n\n\t\t\tdo {\n#define DO(x, x1, xM) \\\n\txM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, _mm_srli_epi32(xM, 30)), vi);\n#define DO_ALLI \\\n\tDO_ALL \\\n\tvi = _mm_add_epi32(vi, cone);\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI\n#undef DO_ALLI\n#undef DO\n\t\t\t} while (--n);\n\t\t}\n\n\t\tversion = flavor;\n\n\t\tif (!(match->flags & MATCH_SKIP)) {\n\t\t\tconst vtype c0x7fffffff = _mm_set1_epi32(0x7fffffff);\n\t\t\tconst vtype c0x9908b0df = _mm_set1_epi32(0x9908b0df);\n\n#define DO(x, x1, xM) \\\n\tx = _mm_xor_si128(xM, _mm_srli_epi32(_mm_or_si128(seed_and_0x80000000, \\\n\t _mm_and_si128(x1, c0x7fffffff)), 1));\n\t\t\tDO_ALL\n#undef DO\n\n#define DO(xout, xin, x1) \\\n\txout = _mm_xor_si128(xin, _mm_mullo_epi32(c0x9908b0df, \\\n\t _mm_and_si128(x1, cone)));\n\t\t\tDO(x710.a, x.a, x1.a)\n\t\t\tDO(x710.b, x.b, x1.b)\n\t\t\tDO(x710.c, x.c, x1.c)\n\t\t\tDO(x710.d, x.d, x1.d)\n\t\t\tDO(x710.e, x.e, x1.e)\n\t\t\tDO(x710.f, x.f, x1.f)\n\t\t\tDO(x710.g, x.g, x1.g)\n\t\t\tDO(x710.h, x.h, x1.h)\n#undef DO\n\n\t\t\tif (version == PHP_521) {\n#define DO(x) \\\n\tx = _mm_xor_si128(x, c0x9908b0df);\n\t\t\t\tDO(x.b)\n\t\t\t\tDO(x.d)\n\t\t\t\tDO(x.f)\n\t\t\t\tDO(x.h)\n#undef DO\n\t\t\t} else\n\t\t\t\tx = x710;\n\t\t}\n\n\t\tdo {\n\t\t\tuint32_t maybe = 1;\n\n\t\t\tif (!(match->flags & MATCH_SKIP)) {\n\t\t\t\tconst vtype c0x9d2c5680 = _mm_set1_epi32(0x9d2c5680);\n\t\t\t\tconst vtype c0xefc60000 = _mm_set1_epi32(0xefc60000);\n\n#define DO(x, x1, xM) \\\n\tx = _mm_xor_si128(x, _mm_srli_epi32(x, 11));\n\t\t\t\tDO_ALL\n#undef DO\n\n#define DO_SC(x, s, c) \\\n\tx = _mm_xor_si128(x, _mm_and_si128(_mm_slli_epi32(x, s), c));\n#define DO(x, x1, xM) \\\n\tDO_SC(x, 7, c0x9d2c5680) \\\n\tDO_SC(x, 15, c0xefc60000)\n\t\t\t\tDO_ALL\n#undef DO\n#undef DO_SC\n\n#define DO(x, x1, xM) \\\n\tx = _mm_xor_si128(x, _mm_srli_epi32(x, 18));\n\t\t\t\tDO_ALL\n#undef DO\n\n\t\t\t\tif (match->flags & MATCH_FULL) {\n#define DO(x, x1, xM) \\\n\tx = _mm_srli_epi32(x, 1);\n\t\t\t\t\tDO_ALL\n#undef DO\n\t\t\t\t}\n\t\t\t}\n\n#if defined(__SSE4_1__) || defined(__MIC__)\n\t\t\tif (match->flags & MATCH_PURE) {\n#if defined(__MIC__) || defined(__AVX512F__)\n\t\t\t\tmaybe = _mm512_cmpeq_epi32_mask(x.a, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.b, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.c, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.d, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.e, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.f, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.g, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.h, vvalue);\n#else\n\t\t\t\tvtype amask = _mm_cmpeq_epi32(x.a, vvalue);\n\t\t\t\tvtype bmask = _mm_cmpeq_epi32(x.b, vvalue);\n\t\t\t\tvtype cmask = _mm_cmpeq_epi32(x.c, vvalue);\n\t\t\t\tvtype dmask = _mm_cmpeq_epi32(x.d, vvalue);\n\t\t\t\tvtype emask = _mm_cmpeq_epi32(x.e, vvalue);\n\t\t\t\tvtype fmask = _mm_cmpeq_epi32(x.f, vvalue);\n\t\t\t\tvtype gmask = _mm_cmpeq_epi32(x.g, vvalue);\n\t\t\t\tvtype hmask = _mm_cmpeq_epi32(x.h, vvalue);\n\t\t\t\tmaybe = !(_mm_testz_si128(amask, amask) &&\n\t\t\t\t _mm_testz_si128(bmask, bmask) &&\n\t\t\t\t _mm_testz_si128(cmask, cmask) &&\n\t\t\t\t _mm_testz_si128(dmask, dmask) &&\n\t\t\t\t _mm_testz_si128(emask, emask) &&\n\t\t\t\t _mm_testz_si128(fmask, fmask) &&\n\t\t\t\t _mm_testz_si128(gmask, gmask) &&\n\t\t\t\t _mm_testz_si128(hmask, hmask));\n\n\t\t\t}\n\n\n\t\t\tif (maybe) {\n\t\t\t\tunsigned int i;\n\t\t\t\tuint32_t iseed;\n\t\t\t\ttypedef union {\n\t\t\t\t\tatype v;\n\t\t\t\t\tuint32_t s[8][sizeof(vtype) / 4];\n\t\t\t\t} utype;\n\t\t\t\tutype u;\n\t\t\t\t/* Hint to compiler not to waste registers */\n\t\t\t\tvolatile utype uM;\n\t\t\t\tu.v = x;\n\t\t\t\tuM.v = xM;\n#if defined(__MIC__) || defined(__AVX512F__)\n\t\t\t\tfor (i = 0, iseed = seed; i < 8; i++, iseed += 32) {\n\t\t\t\t\tunsigned int j, k;\n\t\t\t\t\tfor (j = 0, k = 30; j < 16; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t\ti++;\n\t\t\t\t\tfor (j = 0, k = 31; j < 16; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t}\n#elif defined(__AVX2__)\n\t\t\t\tfor (i = 0, iseed = seed; i < 8; i++, iseed += 16) {\n\t\t\t\t\tunsigned int j, k;\n\t\t\t\t\tfor (j = 0, k = 14; j < 8; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t\ti++;\n\t\t\t\t\tfor (j = 0, k = 15; j < 8; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t}\n#else\n\t\t\t\tfor (i = 0, iseed = seed; i < 8; i++, iseed += 8) {\n\t\t\t\t\tCOMPARE(u.s[i][0], uM.s[i][0], iseed + 6)\n\t\t\t\t\tCOMPARE(u.s[i][1], uM.s[i][1], iseed + 4)\n\t\t\t\t\tCOMPARE(u.s[i][2], uM.s[i][2], iseed + 2)\n\t\t\t\t\tCOMPARE(u.s[i][3], uM.s[i][3], iseed)\n\t\t\t\t\ti++;\n\t\t\t\t\tCOMPARE(u.s[i][0], uM.s[i][0], iseed + 7)\n\t\t\t\t\tCOMPARE(u.s[i][1], uM.s[i][1], iseed + 5)\n\t\t\t\t\tCOMPARE(u.s[i][2], uM.s[i][2], iseed + 3)\n\t\t\t\t\tCOMPARE(u.s[i][3], uM.s[i][3], iseed + 1)\n\t\t\t\t}\n\n\t\t\t\t/* Hint to compiler not to spill xM above */\n\t\t\t\txM = uM.v;\n\t\t\t}\n\n\t\t\tif (version != PHP_521)\n\t\t\t\tbreak;\n\t\t\tversion = PHP_710;\n\t\t\tx = x710;\n\t\t} while (1);\n#else\n\t\ttypedef struct {\n\t\t\tuint32_t a, b, c, d;\n\t\t} atype;\n\t\tatype x = {}, x710 = {};\n\t\tdo {\n\t\t\tatype x1, xM;\n\t\t\tversion_t version;\n\t\t\tunsigned int i;\n\n\t\t\txM.a = seed;\n\t\t\txM.b = seed + 1;\n\t\t\txM.c = seed + 2;\n\t\t\txM.d = seed + 3;\n\n#define DO_ALL \\\n\tDO(x.a, x1.a, xM.a) \\\n\tDO(x.b, x1.b, xM.b) \\\n\tDO(x.c, x1.c, xM.c) \\\n\tDO(x.d, x1.d, xM.d)\n\n\t\t\tif (flavor == PHP_LEGACY) {\n#define DO(x, x1, xM) \\\n\txM += xM + 1; \\\n\tx1 = xM *= 69069; \\\n\txM *= 0x4396a0b1;\n\t\t\t\tDO_ALL\n#undef DO\n\t\t\t} else {\n#define DO(x, x1, xM) \\\n\tx1 = xM = 1812433253U * (xM ^ seed_shr_30) + 1;\n\t\t\t\tDO_ALL\n#undef DO\n\n\t\t\t\tfor (i = 2; i <= M; i++) {\n#define DO(x, x1, xM) \\\n\tNEXT_STATE(xM, i)\n\t\t\t\t\tDO_ALL\n#undef DO\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tversion = flavor;\n\n\t\t\tif (!(match->flags & MATCH_SKIP)) {\n#define DO(x, x1, xM) \\\n\tx = ((seed_and_0x80000000 | (x1 & 0x7fffffff)) >> 1) ^ xM;\n\t\t\t\tDO_ALL\n#undef DO\n\n#define DO(xout, xin, x1) \\\n\txout = xin ^ ((x1 & 1) * 0x9908b0df);\n\t\t\t\tDO(x710.a, x.a, x1.a)\n\t\t\t\tDO(x710.b, x.b, x1.b)\n\t\t\t\tDO(x710.c, x.c, x1.c)\n\t\t\t\tDO(x710.d, x.d, x1.d)\n#undef DO\n\n\t\t\t\tif (version == PHP_521) {\n\t\t\t\t\tx.b ^= 0x9908b0df;\n\t\t\t\t\tx.d ^= 0x9908b0df;\n\t\t\t\t} else\n\t\t\t\t\tx = x710;\n\t\t\t}\n\n\t\t\tdo {\n\t\t\t\tif (!(match->flags & MATCH_SKIP)) {\n#define DO(x, x1, xM) \\\n\tx ^= x >> 11; \\\n\tx ^= (x << 7) & 0x9d2c5680; \\\n\tx ^= (x << 15) & 0xefc60000; \\\n\tx ^= x >> 18;\n\t\t\t\t\tDO_ALL\n#undef DO\n\n\t\t\t\t\tif (match->flags & MATCH_FULL) {\n#define DO(x, x1, xM) \\\n\tx >>= 1;\n\t\t\t\t\t\tDO_ALL\n#undef DO\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tCOMPARE(x.a, x1.a, xM.a, seed)\n\t\t\t\tCOMPARE(x.b, x1.b, xM.b, seed + 1)\n\t\t\t\tCOMPARE(x.c, x1.c, xM.c, seed + 2)\n\t\t\t\tCOMPARE(x.d, x1.d, xM.d, seed + 3)\n\n\t\t\t\tif (version != PHP_521)\n\t\t\t\t\tbreak;\n\t\t\t\tversion = PHP_710;\n\t\t\t\tx = x710;\n\t\t\t} while (1);\n\n\t\t\tseed += 4;\n\t\t} while (seed & ((1 << P) - 1));\n\n\t} #pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30, vvalue)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/php_mt_seed/php_mt_seed.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)", "context_chars": 100, "text": "match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30, vvalue)\n#elif defined(__SSE2__)\n#else\n#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)\n\n\n\tfor (base = start; base < end; base++) {\n\t\tuint32_t seed = (uint32_t)base << P;\n#if defined(__SSE2__) || defined(__MIC__)\n\t\ttypedef struct {\n\t\t\tvtype a, b, c, d, e, f, g, h;\n\t\t} atype;\n\t\tatype xM, x = {}, x710 = {};\n\t\t/* Hint to compiler not to waste registers */\n\t\tvolatile atype x1;\n\t\tconst vtype cone = _mm_set1_epi32(1);\n\t\tvtype vseed = _mm_set1_epi32(seed);\n\t\tversion_t version;\n\n#define DO(which, add) \\\n\txM.which = _mm_add_epi32(xM.a, _mm_set1_epi32(add));\n#if defined(__MIC__) || defined(__AVX512F__)\n\t\txM.a = _mm512_add_epi32(vseed, _mm512_set_epi32(\n\t\t 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30));\n\t\tDO(b, 1) DO(c, 32) DO(d, 33)\n\t\tDO(e, 64) DO(f, 65) DO(g, 96) DO(h, 97)\n#elif defined(__AVX2__)\n\t\txM.a = _mm256_add_epi32(vseed, _mm256_set_epi32(\n\t\t 0, 2, 4, 6, 8, 10, 12, 14));\n\t\tDO(b, 1) DO(c, 16) DO(d, 17)\n\t\tDO(e, 32) DO(f, 33) DO(g, 48) DO(h, 49)\n#else\n\t\txM.a = _mm_add_epi32(vseed, _mm_set_epi32(0, 2, 4, 6));\n\t\tDO(b, 1) DO(c, 8) DO(d, 9)\n\t\tDO(e, 16) DO(f, 17) DO(g, 24) DO(h, 25)\n\n#undef DO\n\n#define DO_ALL \\\n\tDO(x.a, x1.a, xM.a) \\\n\tDO(x.b, x1.b, xM.b) \\\n\tDO(x.c, x1.c, xM.c) \\\n\tDO(x.d, x1.d, xM.d) \\\n\tDO(x.e, x1.e, xM.e) \\\n\tDO(x.f, x1.f, xM.f) \\\n\tDO(x.g, x1.g, xM.g) \\\n\tDO(x.h, x1.h, xM.h)\n\n\t\tif (flavor == PHP_LEGACY) {\n\t\t\tconst vtype c69069 = _mm_set1_epi32(69069);\n\t\t\tconst vtype c69069to396 = _mm_set1_epi32(0x4396a0b1);\n\n#define DO(x, x1, xM) \\\n\txM = _mm_add_epi32(_mm_add_epi32(xM, xM), cone); \\\n\tx1 = xM = _mm_mullo_epi32(c69069, xM); \\\n\txM = _mm_mullo_epi32(c69069to396, xM);\n\t\t\tDO_ALL\n#undef DO\n\t\t} else {\n\t\t\tconst vtype cmul = _mm_set1_epi32(1812433253U);\n\t\t\tvtype vi = _mm_add_epi32(cone, cone);\n\t\t\tunsigned int n = (M - 1) / 22;\n\n#define DO(x, x1, xM) \\\n\tx1 = xM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, seed_shr_30), cone);\n\t\t\tDO_ALL\n#undef DO\n\n\t\t\tdo {\n#define DO(x, x1, xM) \\\n\txM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, _mm_srli_epi32(xM, 30)), vi);\n#define DO_ALLI \\\n\tDO_ALL \\\n\tvi = _mm_add_epi32(vi, cone);\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI\n#undef DO_ALLI\n#undef DO\n\t\t\t} while (--n);\n\t\t}\n\n\t\tversion = flavor;\n\n\t\tif (!(match->flags & MATCH_SKIP)) {\n\t\t\tconst vtype c0x7fffffff = _mm_set1_epi32(0x7fffffff);\n\t\t\tconst vtype c0x9908b0df = _mm_set1_epi32(0x9908b0df);\n\n#define DO(x, x1, xM) \\\n\tx = _mm_xor_si128(xM, _mm_srli_epi32(_mm_or_si128(seed_and_0x80000000, \\\n\t _mm_and_si128(x1, c0x7fffffff)), 1));\n\t\t\tDO_ALL\n#undef DO\n\n#define DO(xout, xin, x1) \\\n\txout = _mm_xor_si128(xin, _mm_mullo_epi32(c0x9908b0df, \\\n\t _mm_and_si128(x1, cone)));\n\t\t\tDO(x710.a, x.a, x1.a)\n\t\t\tDO(x710.b, x.b, x1.b)\n\t\t\tDO(x710.c, x.c, x1.c)\n\t\t\tDO(x710.d, x.d, x1.d)\n\t\t\tDO(x710.e, x.e, x1.e)\n\t\t\tDO(x710.f, x.f, x1.f)\n\t\t\tDO(x710.g, x.g, x1.g)\n\t\t\tDO(x710.h, x.h, x1.h)\n#undef DO\n\n\t\t\tif (version == PHP_521) {\n#define DO(x) \\\n\tx = _mm_xor_si128(x, c0x9908b0df);\n\t\t\t\tDO(x.b)\n\t\t\t\tDO(x.d)\n\t\t\t\tDO(x.f)\n\t\t\t\tDO(x.h)\n#undef DO\n\t\t\t} else\n\t\t\t\tx = x710;\n\t\t}\n\n\t\tdo {\n\t\t\tuint32_t maybe = 1;\n\n\t\t\tif (!(match->flags & MATCH_SKIP)) {\n\t\t\t\tconst vtype c0x9d2c5680 = _mm_set1_epi32(0x9d2c5680);\n\t\t\t\tconst vtype c0xefc60000 = _mm_set1_epi32(0xefc60000);\n\n#define DO(x, x1, xM) \\\n\tx = _mm_xor_si128(x, _mm_srli_epi32(x, 11));\n\t\t\t\tDO_ALL\n#undef DO\n\n#define DO_SC(x, s, c) \\\n\tx = _mm_xor_si128(x, _mm_and_si128(_mm_slli_epi32(x, s), c));\n#define DO(x, x1, xM) \\\n\tDO_SC(x, 7, c0x9d2c5680) \\\n\tDO_SC(x, 15, c0xefc60000)\n\t\t\t\tDO_ALL\n#undef DO\n#undef DO_SC\n\n#define DO(x, x1, xM) \\\n\tx = _mm_xor_si128(x, _mm_srli_epi32(x, 18));\n\t\t\t\tDO_ALL\n#undef DO\n\n\t\t\t\tif (match->flags & MATCH_FULL) {\n#define DO(x, x1, xM) \\\n\tx = _mm_srli_epi32(x, 1);\n\t\t\t\t\tDO_ALL\n#undef DO\n\t\t\t\t}\n\t\t\t}\n\n#if defined(__SSE4_1__) || defined(__MIC__)\n\t\t\tif (match->flags & MATCH_PURE) {\n#if defined(__MIC__) || defined(__AVX512F__)\n\t\t\t\tmaybe = _mm512_cmpeq_epi32_mask(x.a, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.b, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.c, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.d, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.e, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.f, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.g, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.h, vvalue);\n#else\n\t\t\t\tvtype amask = _mm_cmpeq_epi32(x.a, vvalue);\n\t\t\t\tvtype bmask = _mm_cmpeq_epi32(x.b, vvalue);\n\t\t\t\tvtype cmask = _mm_cmpeq_epi32(x.c, vvalue);\n\t\t\t\tvtype dmask = _mm_cmpeq_epi32(x.d, vvalue);\n\t\t\t\tvtype emask = _mm_cmpeq_epi32(x.e, vvalue);\n\t\t\t\tvtype fmask = _mm_cmpeq_epi32(x.f, vvalue);\n\t\t\t\tvtype gmask = _mm_cmpeq_epi32(x.g, vvalue);\n\t\t\t\tvtype hmask = _mm_cmpeq_epi32(x.h, vvalue);\n\t\t\t\tmaybe = !(_mm_testz_si128(amask, amask) &&\n\t\t\t\t _mm_testz_si128(bmask, bmask) &&\n\t\t\t\t _mm_testz_si128(cmask, cmask) &&\n\t\t\t\t _mm_testz_si128(dmask, dmask) &&\n\t\t\t\t _mm_testz_si128(emask, emask) &&\n\t\t\t\t _mm_testz_si128(fmask, fmask) &&\n\t\t\t\t _mm_testz_si128(gmask, gmask) &&\n\t\t\t\t _mm_testz_si128(hmask, hmask));\n\n\t\t\t}\n\n\n\t\t\tif (maybe) {\n\t\t\t\tunsigned int i;\n\t\t\t\tuint32_t iseed;\n\t\t\t\ttypedef union {\n\t\t\t\t\tatype v;\n\t\t\t\t\tuint32_t s[8][sizeof(vtype) / 4];\n\t\t\t\t} utype;\n\t\t\t\tutype u;\n\t\t\t\t/* Hint to compiler not to waste registers */\n\t\t\t\tvolatile utype uM;\n\t\t\t\tu.v = x;\n\t\t\t\tuM.v = xM;\n#if defined(__MIC__) || defined(__AVX512F__)\n\t\t\t\tfor (i = 0, iseed = seed; i < 8; i++, iseed += 32) {\n\t\t\t\t\tunsigned int j, k;\n\t\t\t\t\tfor (j = 0, k = 30; j < 16; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t\ti++;\n\t\t\t\t\tfor (j = 0, k = 31; j < 16; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t}\n#elif defined(__AVX2__)\n\t\t\t\tfor (i = 0, iseed = seed; i < 8; i++, iseed += 16) {\n\t\t\t\t\tunsigned int j, k;\n\t\t\t\t\tfor (j = 0, k = 14; j < 8; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t\ti++;\n\t\t\t\t\tfor (j = 0, k = 15; j < 8; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t}\n#else\n\t\t\t\tfor (i = 0, iseed = seed; i < 8; i++, iseed += 8) {\n\t\t\t\t\tCOMPARE(u.s[i][0], uM.s[i][0], iseed + 6)\n\t\t\t\t\tCOMPARE(u.s[i][1], uM.s[i][1], iseed + 4)\n\t\t\t\t\tCOMPARE(u.s[i][2], uM.s[i][2], iseed + 2)\n\t\t\t\t\tCOMPARE(u.s[i][3], uM.s[i][3], iseed)\n\t\t\t\t\ti++;\n\t\t\t\t\tCOMPARE(u.s[i][0], uM.s[i][0], iseed + 7)\n\t\t\t\t\tCOMPARE(u.s[i][1], uM.s[i][1], iseed + 5)\n\t\t\t\t\tCOMPARE(u.s[i][2], uM.s[i][2], iseed + 3)\n\t\t\t\t\tCOMPARE(u.s[i][3], uM.s[i][3], iseed + 1)\n\t\t\t\t}\n\n\t\t\t\t/* Hint to compiler not to spill xM above */\n\t\t\t\txM = uM.v;\n\t\t\t}\n\n\t\t\tif (version != PHP_521)\n\t\t\t\tbreak;\n\t\t\tversion = PHP_710;\n\t\t\tx = x710;\n\t\t} while (1);\n#else\n\t\ttypedef struct {\n\t\t\tuint32_t a, b, c, d;\n\t\t} atype;\n\t\tatype x = {}, x710 = {};\n\t\tdo {\n\t\t\tatype x1, xM;\n\t\t\tversion_t version;\n\t\t\tunsigned int i;\n\n\t\t\txM.a = seed;\n\t\t\txM.b = seed + 1;\n\t\t\txM.c = seed + 2;\n\t\t\txM.d = seed + 3;\n\n#define DO_ALL \\\n\tDO(x.a, x1.a, xM.a) \\\n\tDO(x.b, x1.b, xM.b) \\\n\tDO(x.c, x1.c, xM.c) \\\n\tDO(x.d, x1.d, xM.d)\n\n\t\t\tif (flavor == PHP_LEGACY) {\n#define DO(x, x1, xM) \\\n\txM += xM + 1; \\\n\tx1 = xM *= 69069; \\\n\txM *= 0x4396a0b1;\n\t\t\t\tDO_ALL\n#undef DO\n\t\t\t} else {\n#define DO(x, x1, xM) \\\n\tx1 = xM = 1812433253U * (xM ^ seed_shr_30) + 1;\n\t\t\t\tDO_ALL\n#undef DO\n\n\t\t\t\tfor (i = 2; i <= M; i++) {\n#define DO(x, x1, xM) \\\n\tNEXT_STATE(xM, i)\n\t\t\t\t\tDO_ALL\n#undef DO\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tversion = flavor;\n\n\t\t\tif (!(match->flags & MATCH_SKIP)) {\n#define DO(x, x1, xM) \\\n\tx = ((seed_and_0x80000000 | (x1 & 0x7fffffff)) >> 1) ^ xM;\n\t\t\t\tDO_ALL\n#undef DO\n\n#define DO(xout, xin, x1) \\\n\txout = xin ^ ((x1 & 1) * 0x9908b0df);\n\t\t\t\tDO(x710.a, x.a, x1.a)\n\t\t\t\tDO(x710.b, x.b, x1.b)\n\t\t\t\tDO(x710.c, x.c, x1.c)\n\t\t\t\tDO(x710.d, x.d, x1.d)\n#undef DO\n\n\t\t\t\tif (version == PHP_521) {\n\t\t\t\t\tx.b ^= 0x9908b0df;\n\t\t\t\t\tx.d ^= 0x9908b0df;\n\t\t\t\t} else\n\t\t\t\t\tx = x710;\n\t\t\t}\n\n\t\t\tdo {\n\t\t\t\tif (!(match->flags & MATCH_SKIP)) {\n#define DO(x, x1, xM) \\\n\tx ^= x >> 11; \\\n\tx ^= (x << 7) & 0x9d2c5680; \\\n\tx ^= (x << 15) & 0xefc60000; \\\n\tx ^= x >> 18;\n\t\t\t\t\tDO_ALL\n#undef DO\n\n\t\t\t\t\tif (match->flags & MATCH_FULL) {\n#define DO(x, x1, xM) \\\n\tx >>= 1;\n\t\t\t\t\t\tDO_ALL\n#undef DO\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tCOMPARE(x.a, x1.a, xM.a, seed)\n\t\t\t\tCOMPARE(x.b, x1.b, xM.b, seed + 1)\n\t\t\t\tCOMPARE(x.c, x1.c, xM.c, seed + 2)\n\t\t\t\tCOMPARE(x.d, x1.d, xM.d, seed + 3)\n\n\t\t\t\tif (version != PHP_521)\n\t\t\t\t\tbreak;\n\t\t\t\tversion = PHP_710;\n\t\t\t\tx = x710;\n\t\t\t} while (1);\n\n\t\t\tseed += 4;\n\t\t} while (seed & ((1 << P) - 1));\n\n\t} #pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/php_mt_seed/php_mt_seed.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)", "context_chars": 100, "text": "one) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)\n#else\nfor (base = start; base < end; base++) {\n\t\tuint32_t seed = (uint32_t)base << P;\n#if defined(__SSE2__) || defined(__MIC__)\n\t\ttypedef struct {\n\t\t\tvtype a, b, c, d, e, f, g, h;\n\t\t} atype;\n\t\tatype xM, x = {}, x710 = {};\n\t\t/* Hint to compiler not to waste registers */\n\t\tvolatile atype x1;\n\t\tconst vtype cone = _mm_set1_epi32(1);\n\t\tvtype vseed = _mm_set1_epi32(seed);\n\t\tversion_t version;\n\n#define DO(which, add) \\\n\txM.which = _mm_add_epi32(xM.a, _mm_set1_epi32(add));\n#if defined(__MIC__) || defined(__AVX512F__)\n\t\txM.a = _mm512_add_epi32(vseed, _mm512_set_epi32(\n\t\t 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30));\n\t\tDO(b, 1) DO(c, 32) DO(d, 33)\n\t\tDO(e, 64) DO(f, 65) DO(g, 96) DO(h, 97)\n#elif defined(__AVX2__)\n\t\txM.a = _mm256_add_epi32(vseed, _mm256_set_epi32(\n\t\t 0, 2, 4, 6, 8, 10, 12, 14));\n\t\tDO(b, 1) DO(c, 16) DO(d, 17)\n\t\tDO(e, 32) DO(f, 33) DO(g, 48) DO(h, 49)\n#else\n\t\txM.a = _mm_add_epi32(vseed, _mm_set_epi32(0, 2, 4, 6));\n\t\tDO(b, 1) DO(c, 8) DO(d, 9)\n\t\tDO(e, 16) DO(f, 17) DO(g, 24) DO(h, 25)\n\n#undef DO\n\n#define DO_ALL \\\n\tDO(x.a, x1.a, xM.a) \\\n\tDO(x.b, x1.b, xM.b) \\\n\tDO(x.c, x1.c, xM.c) \\\n\tDO(x.d, x1.d, xM.d) \\\n\tDO(x.e, x1.e, xM.e) \\\n\tDO(x.f, x1.f, xM.f) \\\n\tDO(x.g, x1.g, xM.g) \\\n\tDO(x.h, x1.h, xM.h)\n\n\t\tif (flavor == PHP_LEGACY) {\n\t\t\tconst vtype c69069 = _mm_set1_epi32(69069);\n\t\t\tconst vtype c69069to396 = _mm_set1_epi32(0x4396a0b1);\n\n#define DO(x, x1, xM) \\\n\txM = _mm_add_epi32(_mm_add_epi32(xM, xM), cone); \\\n\tx1 = xM = _mm_mullo_epi32(c69069, xM); \\\n\txM = _mm_mullo_epi32(c69069to396, xM);\n\t\t\tDO_ALL\n#undef DO\n\t\t} else {\n\t\t\tconst vtype cmul = _mm_set1_epi32(1812433253U);\n\t\t\tvtype vi = _mm_add_epi32(cone, cone);\n\t\t\tunsigned int n = (M - 1) / 22;\n\n#define DO(x, x1, xM) \\\n\tx1 = xM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, seed_shr_30), cone);\n\t\t\tDO_ALL\n#undef DO\n\n\t\t\tdo {\n#define DO(x, x1, xM) \\\n\txM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, _mm_srli_epi32(xM, 30)), vi);\n#define DO_ALLI \\\n\tDO_ALL \\\n\tvi = _mm_add_epi32(vi, cone);\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI\n\t\t\t\tDO_ALLI DO_ALLI DO_ALLI DO_ALLI\n#undef DO_ALLI\n#undef DO\n\t\t\t} while (--n);\n\t\t}\n\n\t\tversion = flavor;\n\n\t\tif (!(match->flags & MATCH_SKIP)) {\n\t\t\tconst vtype c0x7fffffff = _mm_set1_epi32(0x7fffffff);\n\t\t\tconst vtype c0x9908b0df = _mm_set1_epi32(0x9908b0df);\n\n#define DO(x, x1, xM) \\\n\tx = _mm_xor_si128(xM, _mm_srli_epi32(_mm_or_si128(seed_and_0x80000000, \\\n\t _mm_and_si128(x1, c0x7fffffff)), 1));\n\t\t\tDO_ALL\n#undef DO\n\n#define DO(xout, xin, x1) \\\n\txout = _mm_xor_si128(xin, _mm_mullo_epi32(c0x9908b0df, \\\n\t _mm_and_si128(x1, cone)));\n\t\t\tDO(x710.a, x.a, x1.a)\n\t\t\tDO(x710.b, x.b, x1.b)\n\t\t\tDO(x710.c, x.c, x1.c)\n\t\t\tDO(x710.d, x.d, x1.d)\n\t\t\tDO(x710.e, x.e, x1.e)\n\t\t\tDO(x710.f, x.f, x1.f)\n\t\t\tDO(x710.g, x.g, x1.g)\n\t\t\tDO(x710.h, x.h, x1.h)\n#undef DO\n\n\t\t\tif (version == PHP_521) {\n#define DO(x) \\\n\tx = _mm_xor_si128(x, c0x9908b0df);\n\t\t\t\tDO(x.b)\n\t\t\t\tDO(x.d)\n\t\t\t\tDO(x.f)\n\t\t\t\tDO(x.h)\n#undef DO\n\t\t\t} else\n\t\t\t\tx = x710;\n\t\t}\n\n\t\tdo {\n\t\t\tuint32_t maybe = 1;\n\n\t\t\tif (!(match->flags & MATCH_SKIP)) {\n\t\t\t\tconst vtype c0x9d2c5680 = _mm_set1_epi32(0x9d2c5680);\n\t\t\t\tconst vtype c0xefc60000 = _mm_set1_epi32(0xefc60000);\n\n#define DO(x, x1, xM) \\\n\tx = _mm_xor_si128(x, _mm_srli_epi32(x, 11));\n\t\t\t\tDO_ALL\n#undef DO\n\n#define DO_SC(x, s, c) \\\n\tx = _mm_xor_si128(x, _mm_and_si128(_mm_slli_epi32(x, s), c));\n#define DO(x, x1, xM) \\\n\tDO_SC(x, 7, c0x9d2c5680) \\\n\tDO_SC(x, 15, c0xefc60000)\n\t\t\t\tDO_ALL\n#undef DO\n#undef DO_SC\n\n#define DO(x, x1, xM) \\\n\tx = _mm_xor_si128(x, _mm_srli_epi32(x, 18));\n\t\t\t\tDO_ALL\n#undef DO\n\n\t\t\t\tif (match->flags & MATCH_FULL) {\n#define DO(x, x1, xM) \\\n\tx = _mm_srli_epi32(x, 1);\n\t\t\t\t\tDO_ALL\n#undef DO\n\t\t\t\t}\n\t\t\t}\n\n#if defined(__SSE4_1__) || defined(__MIC__)\n\t\t\tif (match->flags & MATCH_PURE) {\n#if defined(__MIC__) || defined(__AVX512F__)\n\t\t\t\tmaybe = _mm512_cmpeq_epi32_mask(x.a, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.b, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.c, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.d, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.e, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.f, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.g, vvalue) |\n\t\t\t\t _mm512_cmpeq_epi32_mask(x.h, vvalue);\n#else\n\t\t\t\tvtype amask = _mm_cmpeq_epi32(x.a, vvalue);\n\t\t\t\tvtype bmask = _mm_cmpeq_epi32(x.b, vvalue);\n\t\t\t\tvtype cmask = _mm_cmpeq_epi32(x.c, vvalue);\n\t\t\t\tvtype dmask = _mm_cmpeq_epi32(x.d, vvalue);\n\t\t\t\tvtype emask = _mm_cmpeq_epi32(x.e, vvalue);\n\t\t\t\tvtype fmask = _mm_cmpeq_epi32(x.f, vvalue);\n\t\t\t\tvtype gmask = _mm_cmpeq_epi32(x.g, vvalue);\n\t\t\t\tvtype hmask = _mm_cmpeq_epi32(x.h, vvalue);\n\t\t\t\tmaybe = !(_mm_testz_si128(amask, amask) &&\n\t\t\t\t _mm_testz_si128(bmask, bmask) &&\n\t\t\t\t _mm_testz_si128(cmask, cmask) &&\n\t\t\t\t _mm_testz_si128(dmask, dmask) &&\n\t\t\t\t _mm_testz_si128(emask, emask) &&\n\t\t\t\t _mm_testz_si128(fmask, fmask) &&\n\t\t\t\t _mm_testz_si128(gmask, gmask) &&\n\t\t\t\t _mm_testz_si128(hmask, hmask));\n\n\t\t\t}\n\n\n\t\t\tif (maybe) {\n\t\t\t\tunsigned int i;\n\t\t\t\tuint32_t iseed;\n\t\t\t\ttypedef union {\n\t\t\t\t\tatype v;\n\t\t\t\t\tuint32_t s[8][sizeof(vtype) / 4];\n\t\t\t\t} utype;\n\t\t\t\tutype u;\n\t\t\t\t/* Hint to compiler not to waste registers */\n\t\t\t\tvolatile utype uM;\n\t\t\t\tu.v = x;\n\t\t\t\tuM.v = xM;\n#if defined(__MIC__) || defined(__AVX512F__)\n\t\t\t\tfor (i = 0, iseed = seed; i < 8; i++, iseed += 32) {\n\t\t\t\t\tunsigned int j, k;\n\t\t\t\t\tfor (j = 0, k = 30; j < 16; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t\ti++;\n\t\t\t\t\tfor (j = 0, k = 31; j < 16; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t}\n#elif defined(__AVX2__)\n\t\t\t\tfor (i = 0, iseed = seed; i < 8; i++, iseed += 16) {\n\t\t\t\t\tunsigned int j, k;\n\t\t\t\t\tfor (j = 0, k = 14; j < 8; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t\ti++;\n\t\t\t\t\tfor (j = 0, k = 15; j < 8; j++, k -= 2) {\n\t\t\t\t\t\tCOMPARE(u.s[i][j], uM.s[i][j],\n\t\t\t\t\t\t iseed + k)\n\t\t\t\t\t}\n\t\t\t\t}\n#else\n\t\t\t\tfor (i = 0, iseed = seed; i < 8; i++, iseed += 8) {\n\t\t\t\t\tCOMPARE(u.s[i][0], uM.s[i][0], iseed + 6)\n\t\t\t\t\tCOMPARE(u.s[i][1], uM.s[i][1], iseed + 4)\n\t\t\t\t\tCOMPARE(u.s[i][2], uM.s[i][2], iseed + 2)\n\t\t\t\t\tCOMPARE(u.s[i][3], uM.s[i][3], iseed)\n\t\t\t\t\ti++;\n\t\t\t\t\tCOMPARE(u.s[i][0], uM.s[i][0], iseed + 7)\n\t\t\t\t\tCOMPARE(u.s[i][1], uM.s[i][1], iseed + 5)\n\t\t\t\t\tCOMPARE(u.s[i][2], uM.s[i][2], iseed + 3)\n\t\t\t\t\tCOMPARE(u.s[i][3], uM.s[i][3], iseed + 1)\n\t\t\t\t}\n\n\t\t\t\t/* Hint to compiler not to spill xM above */\n\t\t\t\txM = uM.v;\n\t\t\t}\n\n\t\t\tif (version != PHP_521)\n\t\t\t\tbreak;\n\t\t\tversion = PHP_710;\n\t\t\tx = x710;\n\t\t} while (1);\n#else\n\t\ttypedef struct {\n\t\t\tuint32_t a, b, c, d;\n\t\t} atype;\n\t\tatype x = {}, x710 = {};\n\t\tdo {\n\t\t\tatype x1, xM;\n\t\t\tversion_t version;\n\t\t\tunsigned int i;\n\n\t\t\txM.a = seed;\n\t\t\txM.b = seed + 1;\n\t\t\txM.c = seed + 2;\n\t\t\txM.d = seed + 3;\n\n#define DO_ALL \\\n\tDO(x.a, x1.a, xM.a) \\\n\tDO(x.b, x1.b, xM.b) \\\n\tDO(x.c, x1.c, xM.c) \\\n\tDO(x.d, x1.d, xM.d)\n\n\t\t\tif (flavor == PHP_LEGACY) {\n#define DO(x, x1, xM) \\\n\txM += xM + 1; \\\n\tx1 = xM *= 69069; \\\n\txM *= 0x4396a0b1;\n\t\t\t\tDO_ALL\n#undef DO\n\t\t\t} else {\n#define DO(x, x1, xM) \\\n\tx1 = xM = 1812433253U * (xM ^ seed_shr_30) + 1;\n\t\t\t\tDO_ALL\n#undef DO\n\n\t\t\t\tfor (i = 2; i <= M; i++) {\n#define DO(x, x1, xM) \\\n\tNEXT_STATE(xM, i)\n\t\t\t\t\tDO_ALL\n#undef DO\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tversion = flavor;\n\n\t\t\tif (!(match->flags & MATCH_SKIP)) {\n#define DO(x, x1, xM) \\\n\tx = ((seed_and_0x80000000 | (x1 & 0x7fffffff)) >> 1) ^ xM;\n\t\t\t\tDO_ALL\n#undef DO\n\n#define DO(xout, xin, x1) \\\n\txout = xin ^ ((x1 & 1) * 0x9908b0df);\n\t\t\t\tDO(x710.a, x.a, x1.a)\n\t\t\t\tDO(x710.b, x.b, x1.b)\n\t\t\t\tDO(x710.c, x.c, x1.c)\n\t\t\t\tDO(x710.d, x.d, x1.d)\n#undef DO\n\n\t\t\t\tif (version == PHP_521) {\n\t\t\t\t\tx.b ^= 0x9908b0df;\n\t\t\t\t\tx.d ^= 0x9908b0df;\n\t\t\t\t} else\n\t\t\t\t\tx = x710;\n\t\t\t}\n\n\t\t\tdo {\n\t\t\t\tif (!(match->flags & MATCH_SKIP)) {\n#define DO(x, x1, xM) \\\n\tx ^= x >> 11; \\\n\tx ^= (x << 7) & 0x9d2c5680; \\\n\tx ^= (x << 15) & 0xefc60000; \\\n\tx ^= x >> 18;\n\t\t\t\t\tDO_ALL\n#undef DO\n\n\t\t\t\t\tif (match->flags & MATCH_FULL) {\n#define DO(x, x1, xM) \\\n\tx >>= 1;\n\t\t\t\t\t\tDO_ALL\n#undef DO\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tCOMPARE(x.a, x1.a, xM.a, seed)\n\t\t\t\tCOMPARE(x.b, x1.b, xM.b, seed + 1)\n\t\t\t\tCOMPARE(x.c, x1.c, xM.c, seed + 2)\n\t\t\t\tCOMPARE(x.d, x1.d, xM.d, seed + 3)\n\n\t\t\t\tif (version != PHP_521)\n\t\t\t\t\tbreak;\n\t\t\t\tversion = PHP_710;\n\t\t\t\tx = x710;\n\t\t\t} while (1);\n\n\t\t\tseed += 4;\n\t\t} while (seed & ((1 << P) - 1));\n\n\t} #pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LorienLV/genarchbench/benchmarks/kmer-cnt/parallel.h", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "obId(0);\n\tProgressPercent progress(scheduledTasks.size());\n\tif (progressBar) progress.advance(0);\n\n\tfor (size_t i = 0; i < std::min(maxThreads, scheduledTasks.size()); ++i)\n\t{\n\t\tbool finished = false;\n\t\twhile (!finished)\n\t\t{\n\t\t\tsize_t expected = 0;\n\t\t\twhile(true)\n\t\t\t{\n\t\t\t\texpected = jobId;\n\t\t\t\tif (jobId == scheduledTasks.size()) \n\t\t\t\t{\n\t\t\t\t\tfinished = true;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tif (jobId.compare_exchange_weak(expected, expected + 1))\n\t\t\t\t{\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (!finished) {\n\t\t\t\tupdateFun(scheduledTasks[expected]);\n\t\t\t\tif (progressBar) progress.advance();\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LorienLV/genarchbench/benchmarks/fmi/bwa-mem2/x86_64/src/FMI_search.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ries(int64_t *posArray, int64_t *coordArray, uint32_t count, int32_t nthreads)\n{\n uint32_t i;\n// for(i = 0; i < count; i++)\n {\n int64_t pos = posArray[i];\n int64_t sa_entry = sa_ms_byte[pos];\n sa_entry = sa_entry << 32;\n sa_entry = sa_entry + sa_ls_word[pos];\n //_mm_prefetch((const char *)(sa_ms_byte + pos + SAL_PFD), _MM_HINT_T0);\n coordArray[i] = sa_entry;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LorienLV/genarchbench/benchmarks/fmi/bwa-mem2/sve/src/FMI_search.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ries(int64_t *posArray, int64_t *coordArray, uint32_t count, int32_t nthreads)\n{\n uint32_t i;\n// for(i = 0; i < count; i++)\n {\n int64_t pos = posArray[i];\n int64_t sa_entry = sa_ms_byte[pos];\n sa_entry = sa_entry << 32;\n sa_entry = sa_entry + sa_ls_word[pos];\n //_mm_prefetch((const char *)(sa_ms_byte + pos + SAL_PFD), _MM_HINT_T0);\n coordArray[i] = sa_entry;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dragon1573/Parallel-GMM/Parallel-GMM/OpenMP/clustering.h", "omp_pragma_line": "#pragma omp parallel for num_threads(threads) reduction(+: distance)", "context_chars": 100, "text": "ce = 0;\n int i = 0;\n#pragma warning(disable: 6993)\n /* 你Code Analysis无法分析那就别分析啊,警告无法分析是什么鬼 */\nfor (i = 0; i < dimensions; i++) {\n distance += pow(datasets[sampleId * dimensions + i]\n - centers[clusterId * dimensions + i], 2\n );\n } #pragma omp parallel for num_threads(threads) reduction(+: distance)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dragon1573/Parallel-GMM/Parallel-GMM/OpenMP/clustering.h", "omp_pragma_line": "#pragma omp parallel for num_threads(threads)", "context_chars": 100, "text": "新成本*/\n costs[0] = costs[1];\n costs[1] = 0;\n\n /* 聚类预测 */\n int j = 0;\n for (j = 0; j < dataSize; j++) {\n // 累计成本\n costs[1] += getCost(j);\n // 类簇计数器自增\n #pragma warning(disable: 6011)\n sampleCounts[labels[j]] += 1;\n // 累计类簇中样本值(用于计算聚类中心)\n for (int k = 0; k < dimensions; k++) {\n nextMeans[labels[j] * dimensions + k] +=\n datasets[j * dimensions + k];\n }\n } #pragma omp parallel for num_threads(threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dragon1573/Parallel-GMM/Parallel-GMM/OpenMP/clustering.h", "omp_pragma_line": "#pragma omp parallel for num_threads(threads)", "context_chars": 100, "text": "uble));\n memset(overallMeans, 0, dimensions * sizeof(double));\n\n /* 并行遍历数据集 */\n int i = 0;\nfor (i = 0; i < dataSize; i++) {\n // 将当前样本分配至相应的聚类集合\n counts[labels[i]] += 1;\n\n for (int j = 0; j < dimensions; j++) {\n // 当前特征的偏离\n const double axes = datasets[i * dimensions + j]\n - centers[labels[i] * dimensions + j];\n // 累计当前聚类中心各特征的距离\n variances[labels[i] * dimensions + j] += pow(axes, 2);\n // 累计总体中心\n overallMeans[j] += datasets[i * dimensions + j];\n // 累计所有样本各特征的方差\n minVariances[j] += pow(datasets[i * dimensions + j], 2);\n }\n } #pragma omp parallel for num_threads(threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dragon1573/Parallel-GMM/Parallel-GMM/OpenMP/clustering.h", "omp_pragma_line": "#pragma omp parallel for num_threads(threads)", "context_chars": 100, "text": "存器 */\n costs[0] = costs[1];\n costs[1] = 0;\n\n /* 模型调优 */\n int j = 0;\n for (j = 0; j < dataSize; j++) {\n // 样本概率\n probabilities[j] = 0;\n // 遍历分布\n for (int k = 0; k < clusters; k++) {\n // 样本属于此分布的概率\n double probability = 1;\n /* 计算单高斯分布概率密度 */\n for (int m = 0; m < dimensions; m++) {\n probability *= 1 / sqrt(2 * PI * variances[k * dimensions + m]);\n const double square = pow(\n datasets[j * dimensions + m] - means[k * dimensions + m], 2\n );\n probability *= exp(-0.5 * square / variances[k * dimensions + m]);\n }\n probabilities[j] += priorities[k] * probability;\n\n /* 它与上面probability有什么区别,我也不知道... */\n // 样本属于当前高斯分布的概率\n const double sampleProbability =\n probability * priorities[k] / probabilities[j];\n // 累计权重\n nextPriorities[k] += sampleProbability;\n\n // 遍历维度\n for (int m = 0; m < dimensions; m++) {\n // 累计均值\n nextMeans[k * dimensions + m] +=\n sampleProbability * datasets[j * dimensions + m];\n // 累计方差\n nextVariances[k * dimensions + m] +=\n sampleProbability * pow(datasets[j * dimensions + m], 2);\n }\n }\n\n /* 1e-20已经小于double参与计算的最小值了。别问,问就是魔法值 */\n // 累计样本引入的成本\n costs[1] += max(log10(probabilities[j]), -20);\n } #pragma omp parallel for num_threads(threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.2/ordered/test_ordered_doacross_omp_cur_iteration.c", "omp_pragma_line": "#pragma omp parallel for ordered", "context_chars": 100, "text": "int ordered_doacross(){\n\n int a[N];\n int b[N];\n int c[N];\n a[0] = 0;\n b[0] = 0;\n c[0] = 0;\n\n for(int i = 1; i < N; i++){\n a[i] = i;\n #pragma omp ordered doacross(sink: i-1)\n b[i] = a[i-1];\n #pragma omp ordered doacross(source:omp_cur_iteration)\n c[i] = a[i] + b[i];\n } #pragma omp parallel for ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.2/ordered/test_ordered_doacross.c", "omp_pragma_line": "#pragma omp parallel for ordered", "context_chars": 100, "text": "int ordered_doacross(){\n\n int a[N];\n int b[N];\n int c[N];\n a[0] = 0;\n b[0] = 0;\n c[0] = 0;\n\n for(int i = 1; i < N; i++){\n a[i] = i;\n #pragma omp ordered doacross(sink: i-1)\n b[i] = a[i-1];\n #pragma omp ordered doacross(source:)\n c[i] = a[i] + b[i];\n } #pragma omp parallel for ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.2/runtime_calls/test_omp_in_explicit_task.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "en called inside an explicit task\");\n \n for(int i = 0; i < N; i++){\n A[i] = 1;\n for(int i = 0; i < N; i++){\n A[i] = omp_in_explicit_task();\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/application_kernels/gemv_target_many_matrices.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "out.push_back(allocate(N));\n }\n\n // Doing the computation\n {\n Timer local(\"GEMV\");\n for(int i=0; i < NUM_CALC; i++) {\n gemv(N, 1.0f, manyA[i], manyV[i], manyVout[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/application_kernels/gemv_target_reduction.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": ":Vout[:n])\n for(int row=0; rowfor(int col=0; col #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/task/test_task_target.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ", c[N];\n int errors = 0, i;\n\n#pragma omp task shared(a) private(i)\n#pragma omp target map(from: a)\nfor (i = 0; i < N; i++)\n a[i] = i;\n \n#pragma omp task shared(b) private(i)\n#pragma omp target map(from: b)\n#pragma omp parallel for\n for (i = 0; i < N; i++)\n b[i] = 10;\n\n#pragma omp taskwait\n\n#pragma omp task shared(c) private(i)\n#pragma omp target map(from: c) map(to:a,b)\n#pragma omp parallel for\n for (i = 0; i < N; i++)\n c[i] = a[i] + b[i];\n\n#pragma omp taskwait\n\n for (i = 0; i < N; i++) {\n OMPVV_TEST_AND_SET(errors, (c[i] != i + 10));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/task/test_task_target.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i < N; i++)\n a[i] = i;\n \n#pragma omp task shared(b) private(i)\n#pragma omp target map(from: b)\nfor (i = 0; i < N; i++)\n b[i] = 10;\n\n#pragma omp taskwait\n\n#pragma omp task shared(c) private(i)\n#pragma omp target map(from: c) map(to:a,b)\n#pragma omp parallel for\n for (i = 0; i < N; i++)\n c[i] = a[i] + b[i];\n\n#pragma omp taskwait\n\n for (i = 0; i < N; i++) {\n OMPVV_TEST_AND_SET(errors, (c[i] != i + 10));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/task/test_task_target.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "gma omp taskwait\n\n#pragma omp task shared(c) private(i)\n#pragma omp target map(from: c) map(to:a,b)\nfor (i = 0; i < N; i++)\n c[i] = a[i] + b[i];\n\n#pragma omp taskwait\n\n for (i = 0; i < N; i++) {\n OMPVV_TEST_AND_SET(errors, (c[i] != i + 10));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/target_teams_distribute_parallel_for/test_target_teams_distribute_parallel_for_if_parallel_modifier.c", "omp_pragma_line": "#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE)", "context_chars": 100, "text": "READS_DEVICE)\n for (i = 0; i < N; i++) {\n init_num_threads_dev[i] = omp_get_num_threads();\n }\n\nfor (i = 0; i < N; i++) {\n init_num_threads_host[i] = omp_get_num_threads();\n } #pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/target_teams_distribute_parallel_for/test_target_teams_distribute_parallel_for_if_no_modifier.c", "omp_pragma_line": "#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE)", "context_chars": 100, "text": "READS_DEVICE)\n for (i = 0; i < N; i++) {\n init_num_threads_dev[i] = omp_get_num_threads();\n }\n\nfor (i = 0; i < N; i++) {\n init_num_threads_host[i] = omp_get_num_threads();\n } #pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/requires/test_requires_dynamic_allocators.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": "= omp_init_allocator(x_memspace, 1, x_traits);\n\n x = (int *) omp_alloc(N*sizeof(int), x_alloc);\n\nfor (int i = 0; i < N; i++) {\n x[i] = i;\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/atomic/test_atomic_hint.c", "omp_pragma_line": "#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_HOST) default(shared)", "context_chars": 100, "text": " int errors = 0, num_threads = -1;\n int a[N];\n\n for (int i = 0; i < N; i++) {\n a[i] = 1;\n }\n\nfor (int i = 0; i < N; i++) {\n if (i == 0) {\n num_threads = omp_get_num_threads();\n#pragma omp atomic hint(omp_sync_hint_speculative)\n a[1] += 1;\n }\n#pragma omp atomic hint(omp_sync_hint_speculative)\n a[i] += i;\n } #pragma omp parallel for num_threads(OMPVV_NUM_THREADS_HOST) default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/taskwait/test_taskwait_depend.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ".h\"\n\n#define N 1024\nint errors = 0;\nint test_wrapper() { //wrapper for taskwait depend function\n for (int i=1; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/declare_target/test_declare_target_parallel_for.c", "omp_pragma_line": "#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE) shared(num_threads)", "context_chars": 100, "text": "gma omp declare target\nint parallel_for_fun(int a[N], int b[N], int c[N]) {\n int num_threads = -1;\nfor (int i = 0; i < N; i++) {\n a[i] = b[i]*c[i];\n if (omp_get_thread_num() == 0) {\n num_threads = omp_get_num_threads();\n }\n } #pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE) shared(num_threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/declare_target/test_declare_target_nested.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "lare target\n\nint test_target() { //function in declare target statement \n\n//change values on device\nfor (i = 0; i < N; i++) {\n a[i] = 5;\n b[i] = 10;\n c[i] = 15;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/task/test_parallel_for_reduction_task_device.c", "omp_pragma_line": "#pragma omp parallel for reduction(task, +: sum) num_threads(OMPVV_NUM_THREADS_DEVICE) shared(y, z, num_threads)", "context_chars": 100, "text": "y[i] = i + 1;\n z[i] = 2*(i + 1);\n }\n\n#pragma omp target map(tofrom: sum, y, z, num_threads)\n {\nfor (int i = 0; i < N; i++) {\n#pragma omp task in_reduction(+: sum)\n sum += y[i]*z[i];\n if (omp_get_thread_num() == 0) {\n num_threads = omp_get_num_threads();\n }\n } #pragma omp parallel for reduction(task, +: sum) num_threads(OMPVV_NUM_THREADS_DEVICE) shared(y, z, num_threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/task/test_parallel_for_reduction_task.c", "omp_pragma_line": "#pragma omp parallel for reduction(task, +: sum) num_threads(OMPVV_NUM_THREADS_HOST) shared(y, z, num_threads)", "context_chars": 100, "text": "int expected_sum = 0;\n\n for (int i = 0; i < N; i++) {\n y[i] = i + 1;\n z[i] = 2*(i + 1);\n }\n\nfor (int i = 0; i < N; i++) {\n#pragma omp task in_reduction(+: sum)\n sum += y[i]*z[i];\n if (omp_get_thread_num() == 0) {\n num_threads = omp_get_num_threads();\n }\n } #pragma omp parallel for reduction(task, +: sum) num_threads(OMPVV_NUM_THREADS_HOST) shared(y, z, num_threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/parallel_for_simd/test_parallel_for_simd_atomic.c", "omp_pragma_line": "#pragma omp parallel for simd shared(x) num_threads(OMPVV_NUM_THREADS_HOST)", "context_chars": 100, "text": "lel_for_simd_atomic() {\n OMPVV_INFOMSG(\"test_parallel_for_simd_atomic\");\n int errors = 0, x = 0;\n\nfor (int i = 0; i < N; i++) {\n#pragma omp atomic update\n x += 1;\n } #pragma omp parallel for simd shared(x) num_threads(OMPVV_NUM_THREADS_HOST)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/target/test_target_imperfect_loop.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "0; j < M; j++){\n data2[i][j] = 0;\n }\n }\n\n\n#pragma omp target map(tofrom: data1, data2)\n {\nfor( int i = 0; i < N; i++){\n data1[i] += i;\n for(int j = 0; j < M; j++){\n data2[i][j] += i + j;\n }\n } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/allocate/test_allocate.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": ".h\"\n\n#define N 1024\n\nint test_allocate() {\n int errors = 0;\n int x[N];\n\n#pragma omp allocate(x) \n\nfor (int i = 0; i < N; i++) {\n x[i] = i;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/allocate/test_allocate_allocator.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x, y: 64)", "context_chars": 100, "text": "ERROR_IF(((intptr_t) x) % 64 != 0,\n \"Condition (intptr_t) x) %% 64 != 0 failed\")\n\nfor (int i = 0; i < N; i++) {\n x[i] = i;\n y[i] = 3*i;\n } #pragma omp parallel for simd simdlen(16) aligned(x, y: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/allocate/test_allocate_on_device.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "llocator(omp_default_mem_alloc)\n x = (int *) omp_alloc(N*sizeof(int), omp_default_mem_alloc);\n\n for (int i = 0; i < N; i++) {\n x[i] = 2*i;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/scan/test_scan.c", "omp_pragma_line": "#pragma omp parallel for simd reduction(inscan, +: x) num_threads(OMPVV_NUM_THREADS_HOST)", "context_chars": 100, "text": "ed_x = 0;\n int a[N];\n int b[N];\n\n for (int i = 0; i < N; i++) {\n a[i] = i;\n b[i] = 0;\n }\n\nfor (int i = 0; i < N; i++) {\n x += a[i];\n#pragma omp scan inclusive(x)\n b[i] = x;\n } #pragma omp parallel for simd reduction(inscan, +: x) num_threads(OMPVV_NUM_THREADS_HOST)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/scan/test_scan.c", "omp_pragma_line": "#pragma omp parallel for simd reduction(inscan, +: x) num_threads(OMPVV_NUM_THREADS_HOST)", "context_chars": 100, "text": "ed_x = 0;\n int a[N];\n int b[N];\n\n for (int i = 0; i < N; i++) {\n a[i] = i;\n b[i] = 0;\n }\n\nfor (int i = 0; i < N; i++) {\n b[i] = x;\n#pragma omp scan exclusive(x)\n x += a[i];\n } #pragma omp parallel for simd reduction(inscan, +: x) num_threads(OMPVV_NUM_THREADS_HOST)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/parallel_for/test_parallel_for_notequals.c", "omp_pragma_line": "#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_HOST) shared(x, y, z)", "context_chars": 100, "text": "t z[N];\n\n for (int i = 0; i < N; i++) {\n x[i] = 1;\n y[i] = i + 1;\n z[i] = 2*(i + 1);\n }\n\nfor (int i = 0; i != N; i++) {\n x[i] += y[i]*z[i];\n } #pragma omp parallel for num_threads(OMPVV_NUM_THREADS_HOST) shared(x, y, z)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/parallel_for/test_parallel_for_order_concurrent.c", "omp_pragma_line": "#pragma omp parallel for order(concurrent) num_threads(OMPVV_NUM_THREADS_HOST) shared(x, y, z)", "context_chars": 100, "text": "t z[N];\n\n for (int i = 0; i < N; i++) {\n x[i] = 1;\n y[i] = i + 1;\n z[i] = 2*(i + 1);\n }\n\nfor (int i = 0; i < N; i++) {\n x[i] += y[i]*z[i];\n } #pragma omp parallel for order(concurrent) num_threads(OMPVV_NUM_THREADS_HOST) shared(x, y, z)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/parallel_for/test_parallel_for_allocate.c", "omp_pragma_line": "#pragma omp parallel for allocate(x_alloc: x) private(x) shared(result) num_threads(OMPVV_NUM_THREADS_HOST)", "context_chars": 100, "text": "for (int i = 0; i < N; i++) {\n for (int j = 0; j < N; j++) {\n result[i][j] = -1;\n }\n }\n\nfor (int i = 0; i < N; i++) {\n x = (int *) malloc(N*sizeof(int));\n if (x != NULL) {\n#pragma omp simd simdlen(16) aligned(x: 64)\n for (int j = 0; j < N; j++) {\n x[j] = j*i;\n }\n for (int j = 0; j < N; j++) {\n result[i][j] = x[j];\n }\n free(x);\n successful_alloc++;\n }\n } #pragma omp parallel for allocate(x_alloc: x) private(x) shared(result) num_threads(OMPVV_NUM_THREADS_HOST)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/atomic/test_atomic_compare.c", "omp_pragma_line": "#pragma omp parallel for shared(pmax) ", "context_chars": 100, "text": "s max through non-parallel methods\n if(arr[i] > smax){\n smax = arr[i];\n }\n }\n for(int i = 0; i pmax){\n pmax = arr[i];\n }\n } #pragma omp parallel for shared(pmax) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/dispatch/test_dispatch.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t *arr);\n\n#pragma omp declare variant(add_two) match(construct={dispatch}) \nvoid add(int *arr){\n for (int i = 0; i < N; i++){ // Base function adds 1 to array values\n arr[i] = arr[i]+1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/dispatch/test_dispatch_is_device_ptr.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "e device pointer\");\n return 1;\n }\n #pragma omp target is_device_ptr(arr)\n {\n for(int i = 0; i < N; i++){\n arr[i] = i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/dispatch/test_dispatch_device.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt *arr);\n\n#pragma omp declare variant(add_dev) match(construct={dispatch}) \nvoid add(int *arr){\n for (int i = 0; i < N; i++){ // Base function adds 1 to array values\n arr[i] = arr[i]+1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/order/test_parallel_for_order_unconstrained.c", "omp_pragma_line": "#pragma omp parallel for order(unconstrained:concurrent)", "context_chars": 100, "text": "= 0; i < N; i++) {\n\t\tx[i] = i;\n\t}\n\n\tOMPVV_TEST_OFFLOADING;\n\n\t#pragma omp target map(tofrom: x)\n\t{\n\t\tfor (int i = 0; i < N; i++) {\n\t\t\tx[i] = x[i] + 2;\t\n\t\t} #pragma omp parallel for order(unconstrained:concurrent)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_calloc_host.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "null\"); \n return (1); \n }\n\n int not_init_to_zero = 0;\n int not_correct_updated_values = 0;\n\n for (int i = 0; i < N; i++) {\n if (x[i] != 0) {\n #pragma omp atomic write\n not_init_to_zero = 1;\n } \n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_calloc_host.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "+) {\n if (x[i] != 0) {\n #pragma omp atomic write\n not_init_to_zero = 1;\n } \n }\n\n for (int i = 0; i < N; i++) {\n x[i] = i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_calloc_host.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 1;\n } \n }\n\n #pragma omp parallel for\n for (int i = 0; i < N; i++) {\n x[i] = i;\n }\n \n for (int i = 0; i < N; i++) {\n if (x[i] != i) {\n #pragma omp atomic write\n not_correct_updated_values = 1;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_aligned_calloc.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": "ndition ((intptr_t)(x))%%64 != 0 failed. The memory does not seem to be properly aligned.\");\n\n for (int i = 0; i < N; i++) {\n x[i] = i;\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_aligned_calloc.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": "imd simdlen(16) aligned(x: 64)\n for (int i = 0; i < N; i++) {\n x[i] = i;\n }\n\n for (int i = 0; i < N; i++) {\n if (x[i] != i) {\n #pragma omp atomic write\n not_correct_array_values = 1; \n }\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_omp_target_aligned_alloc_device.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": " OMPVV_ERROR_IF(((intptr_t)(x))%64 != 0, \" Condition ((intptr_t)(x))%%64 != 0 failed \");\n\n for (int i = 0; i < N; i++) {\n x[i] = i;\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_omp_target_aligned_alloc_device.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": "imd simdlen(16) aligned(x: 64)\n for (int i = 0; i < N; i++) {\n x[i] = i;\n }\n\n for (int i = 0; i < N; i++) {\n if (x[i] != i) {\n #pragma omp atomic write\n not_correct_array_values = 1; \n }\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_calloc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++; \n } else {\n int not_init_to_zero = 0;\n int not_correct_updated_values = 0;\n\n for (int i = 0; i < N; i++) {\n if (x[i] != 0) {\n #pragma omp atomic write\n not_init_to_zero = 1;\n } \n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_calloc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 0) {\n #pragma omp atomic write\n not_init_to_zero = 1;\n } \n }\n\n for (int i = 0; i < N; i++) {\n x[i] = i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_calloc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " #pragma omp parallel for\n for (int i = 0; i < N; i++) {\n x[i] = i;\n }\n\n for (int i = 0; i < N; i++) {\n if (x[i] != i) {\n #pragma omp atomic write\n not_correct_updated_values = 1;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_omp_aligned_alloc_host.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": "_SET_VERBOSE(errors, ((intptr_t)(x))%64 != 0);\n\n int values_did_not_match_expected_changes = 0;\n\n for (int i = 0; i < N; i++) {\n x[i] = i;\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_omp_aligned_alloc_host.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": "p parallel for simd simdlen(16) aligned(x: 64)\n for (int i = 0; i < N; i++) {\n x[i] = i;\n }\n\n for (int i = 0; i < N; i++) {\n if (x[i] != i) {\n values_did_not_match_expected_changes = 1;\n }\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_allocate_allocator_align.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": " omp allocate(x) allocator(omp_default_mem_alloc) align(64)\n\n\n#pragma omp target map(from:x[:N])\n{\t\nfor (int i = 0; i < N; i++) {\n x[i] = i;\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_aligned_calloc.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": "Condition ((intptr_t)(x))%%64 != 0 failed. The memory does not seem to be properly aligned.\");\n\n for (int i = 0; i < N; i++) {\n x[i] = i;\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_aligned_calloc.c", "omp_pragma_line": "#pragma omp parallel for simd simdlen(16) aligned(x: 64)", "context_chars": 100, "text": "el for simd simdlen(16) aligned(x: 64)\n for (int i = 0; i < N; i++) {\n x[i] = i;\n }\n\n for (int i = 0; i < N; i++) {\n if (x[i] != i) {\n #pragma omp atomic write\n not_correct_array_values = 1; \n }\n } #pragma omp parallel for simd simdlen(16) aligned(x: 64)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_static.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " DATA_TYPE POLYBENCH_2D(A, N, N, n, n))\n{\n int i, j, k;\n\n for (k = 0; k < _PB_N; k++)\n {\n for (j = k + 1; j < _PB_N; j++)\n A[k][j] = A[k][j] / A[k][k];\n #pragma omp parallel for schedule(static)\n for (i = k + 1; i < _PB_N; i++)\n for (j = k + 1; j < _PB_N; j++)\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n }\n}\n\nint main(int argc, char **argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);\n\n /* Initialize array(s). */\n init_array(n, POLYBENCH_ARRAY(A));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_lu(n, POLYBENCH_ARRAY(A));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(A);\n\n return 0;\n} #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_static.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "lel for schedule(static)\n for (j = k + 1; j < _PB_N; j++)\n A[k][j] = A[k][j] / A[k][k];\n for (i = k + 1; i < _PB_N; i++)\n for (j = k + 1; j < _PB_N; j++)\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n }\n}\n\nint main(int argc, char **argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);\n\n /* Initialize array(s). */\n init_array(n, POLYBENCH_ARRAY(A));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_lu(n, POLYBENCH_ARRAY(A));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(A);\n\n return 0;\n} #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_dynamic.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic)", "context_chars": 100, "text": " DATA_TYPE POLYBENCH_2D(A, N, N, n, n))\n{\n int i, j, k;\n\n for (k = 0; k < _PB_N; k++)\n {\n for (j = k + 1; j < _PB_N; j++)\n A[k][j] = A[k][j] / A[k][k];\n #pragma omp parallel for schedule(dynamic)\n for (i = k + 1; i < _PB_N; i++)\n for (j = k + 1; j < _PB_N; j++)\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n }\n}\n\nint main(int argc, char **argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);\n\n /* Initialize array(s). */\n init_array(n, POLYBENCH_ARRAY(A));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_lu(n, POLYBENCH_ARRAY(A));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(A);\n\n return 0;\n} #pragma omp parallel for schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_dynamic.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic)", "context_chars": 100, "text": "el for schedule(dynamic)\n for (j = k + 1; j < _PB_N; j++)\n A[k][j] = A[k][j] / A[k][k];\n for (i = k + 1; i < _PB_N; i++)\n for (j = k + 1; j < _PB_N; j++)\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n }\n}\n\nint main(int argc, char **argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);\n\n /* Initialize array(s). */\n init_array(n, POLYBENCH_ARRAY(A));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_lu(n, POLYBENCH_ARRAY(A));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(A);\n\n return 0;\n} #pragma omp parallel for schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_wrong_for.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "kernel_lu(int n,\n DATA_TYPE POLYBENCH_2D(A, N, N, n, n))\n{\n int i, j, k;\n \n for (k = 0; k < _PB_N; k++)\n {\n for (j = k + 1; j < _PB_N; j++)\n A[k][j] = A[k][j] / A[k][k];\n for (i = k + 1; i < _PB_N; i++)\n for (j = k + 1; j < _PB_N; j++)\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_gpu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " #pragma omp target data map(tofrom:A)\n { \t\n for (k = 0; k < _PB_N; k++)\n {\n\t \n for (j = k + 1; j < _PB_N; j++)\n A[k][j] = A[k][j] / A[k][k]; \n \n #pragma omp parallel for\n for (i = k + 1; i < _PB_N; i++)\n for (j = k + 1; j < _PB_N; j++)\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n \n }\n }\n }\n}\n\nint main(int argc, char **argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);\n\n /* Initialize array(s). */\n init_array(n, POLYBENCH_ARRAY(A));\n //my_print_array(n, POLYBENCH_ARRAY(A));\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_lu(n, POLYBENCH_ARRAY(A));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));\n printf(\"\\n\");\n //my_print_array(n, POLYBENCH_ARRAY(A));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(A);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_gpu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lel for\n for (j = k + 1; j < _PB_N; j++)\n A[k][j] = A[k][j] / A[k][k]; \n \n for (i = k + 1; i < _PB_N; i++)\n for (j = k + 1; j < _PB_N; j++)\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n \n }\n }\n }\n}\n\nint main(int argc, char **argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);\n\n /* Initialize array(s). */\n init_array(n, POLYBENCH_ARRAY(A));\n //my_print_array(n, POLYBENCH_ARRAY(A));\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_lu(n, POLYBENCH_ARRAY(A));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));\n printf(\"\\n\");\n //my_print_array(n, POLYBENCH_ARRAY(A));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(A);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_guided.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided)", "context_chars": 100, "text": " DATA_TYPE POLYBENCH_2D(A, N, N, n, n))\n{\n int i, j, k;\n\n for (k = 0; k < _PB_N; k++)\n {\n for (j = k + 1; j < _PB_N; j++)\n A[k][j] = A[k][j] / A[k][k];\n #pragma omp parallel for schedule(guided)\n for (i = k + 1; i < _PB_N; i++)\n for (j = k + 1; j < _PB_N; j++)\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n }\n}\n\nint main(int argc, char **argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);\n\n /* Initialize array(s). */\n init_array(n, POLYBENCH_ARRAY(A));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_lu(n, POLYBENCH_ARRAY(A));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(A);\n\n return 0;\n} #pragma omp parallel for schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_guided.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided)", "context_chars": 100, "text": "lel for schedule(guided)\n for (j = k + 1; j < _PB_N; j++)\n A[k][j] = A[k][j] / A[k][k];\n for (i = k + 1; i < _PB_N; i++)\n for (j = k + 1; j < _PB_N; j++)\n A[i][j] = A[i][j] - A[i][k] * A[k][j];\n }\n}\n\nint main(int argc, char **argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);\n\n /* Initialize array(s). */\n init_array(n, POLYBENCH_ARRAY(A));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_lu(n, POLYBENCH_ARRAY(A));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(A);\n\n return 0;\n} #pragma omp parallel for schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++) { y_data[i] *= beta; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " alpha;\n\n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++) { y_data[i] = 0.0; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "um_vectors; i++) { y_data[i] = 0.0; }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++) { y_data[i] *= temp; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++)\n {\n y_data[i] *= alpha;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * num_vectors; i++) { y_data[i] *= beta; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " alpha;\n\n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * num_vectors; i++) { y_data[i] = 0.0; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "um_vectors; i++) { y_data[i] = 0.0; }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * num_vectors; i++) { y_data[i] *= temp; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * num_vectors; i++)\n {\n y_data[i] *= alpha;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ectorData(v);\n\n if (v->num_active_vectors == v->num_vectors)\n {\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < v->num_vectors * size; j++) { vector_data[j] = value; } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "e_indices[i] * size;\n end_offset = start_offset + size;\n\n#ifdef HYPRE_USING_OPENMP\n for (j = start_offset; j < end_offset; j++) { vector_data[j] = value; } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ind[i] * size;\n dest = y_data + y_active_ind[i] * size;\n\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < size; j++) { dest[j] += alpha * src[j]; } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ve_ind[i] * size;\n current_alpha = alpha[ al_active_ind[i] ];\n\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < size; j++)\n {\n dest[j] = current_alpha * src[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c", "omp_pragma_line": "#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "_data + x_active_ind[i] * size;\n current_product = 0.0;\n\n#ifdef HYPRE_USING_OPENMP\n for (k = 0; k < size; k++)\n {\n current_product += x_ptr[k] * hypre_conj(y_ptr[k]);\n } #pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c", "omp_pragma_line": "#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "tr = y_data + y_active_ind[i] * size;\n current_product = 0.0;\n\n#ifdef HYPRE_USING_OPENMP\n for (k = 0; k < size; k++)\n {\n current_product += x_ptr[k] * hypre_conj(y_ptr[k]);\n } #pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c", "omp_pragma_line": "#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "r = x_data + x_active_ind[0] * size;\n current_coef = *rVal++;\n\n#ifdef HYPRE_USING_OPENMP\n for (k = 0; k < size; k++)\n {\n y_ptr[k] = current_coef * x_ptr[k];\n } #pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c", "omp_pragma_line": "#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "data + x_active_ind[i] * size;\n current_coef = *rVal++;\n\n#ifdef HYPRE_USING_OPENMP\n for (k = 0; k < size; k++)\n {\n y_ptr[k] += current_coef * x_ptr[k];\n } #pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c", "omp_pragma_line": "#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "data + x_active_ind[i] * size;\n current_coef = *rVal++;\n\n#ifdef HYPRE_USING_OPENMP\n for (k = 0; k < size; k++)\n {\n y_ptr[k] += current_coef * x_ptr[k];\n } #pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n\n if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix))\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < local_num_rows; i++)\n {\n hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i];\n hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "re_IJMatrixPrintLevel(matrix);\n\n hypre_MPI_Comm_rank(comm, &my_id);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < nrows; i++)\n {\n row_index = rows[i];\n if (row_index >= row_partitioning[0] &&\n row_index < row_partitioning[1])\n {\n /* compute local row number */\n index = (HYPRE_Int)(row_index - row_partitioning[0]);\n ncols[i] = diag_i[index + 1] - diag_i[index] + offd_i[index + 1] - offd_i[index];\n }\n else\n {\n ncols[i] = 0;\n if (print_level)\n {\n hypre_printf (\"Warning! Row %b is not on Proc. %d!\\n\",\n row_index, my_id);\n }\n }\n } #pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "offd = hypre_CSRMatrixNumNonzeros(offd);\n HYPRE_Int ii;\n\n#ifdef HYPRE_USING_OPENMP\n for (ii = 0; ii < nnz_diag; ii++)\n {\n diag_data[ii] = value;\n } #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "r (ii = 0; ii < nnz_diag; ii++)\n {\n diag_data[ii] = value;\n }\n#ifdef HYPRE_USING_OPENMP\n for (ii = 0; ii < nnz_offd; ii++)\n {\n offd_data[ii] = value;\n } #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,j0,temp)", "context_chars": 100, "text": "SRMatrixJ(diag);\n diag_data = hypre_CSRMatrixData(diag);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n j0 = diag_i[i];\n for (j = j0; j < diag_i[i + 1]; j++)\n {\n if (diag_j[j] == i)\n {\n temp = diag_data[j0];\n diag_data[j0] = diag_data[j];\n diag_data[j] = temp;\n diag_j[j] = diag_j[j0];\n diag_j[j0] = i;\n break;\n }\n }\n } #pragma omp parallel for private (i,j,j0,temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "+)\n {\n col_map_offd[i] = tmp_j[i];\n }\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < nnz_offd; i++)\n {\n offd_j[i] = hypre_BigBinarySearch(col_map_offd, big_offd_j[i], num_cols_offd);\n } #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " read all row indices\");\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_nonzeros; i++)\n {\n rows[i] = (HYPRE_BigInt) i32buffer[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " read all row indices\");\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_nonzeros; i++)\n {\n rows[i] = (HYPRE_BigInt) i64buffer[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ad all column indices\");\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_nonzeros; i++)\n {\n cols[i] = (HYPRE_BigInt) i32buffer[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ad all column indices\");\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_nonzeros; i++)\n {\n cols[i] = (HYPRE_BigInt) i64buffer[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "l matrix coefficients\");\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_nonzeros; i++)\n {\n vals[i] = (HYPRE_Complex) f32buffer[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "l matrix coefficients\");\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_nonzeros; i++)\n {\n vals[i] = (HYPRE_Complex) f64buffer[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/HYPRE_IJMatrix.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "pute preliminary partial sums (in parallel) within each interval */\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < nvals; j += bsize)\n {\n HYPRE_Int i, n = hypre_min((j + bsize), nvals);\n\n sums[j] = 0;\n for (i = j + 1; i < n; i++)\n {\n sums[i] = sums[i - 1] + vals[i - 1];\n }\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/HYPRE_IJMatrix.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "mpute final partial sums (in parallel) for the remaining entries */\n#ifdef HYPRE_USING_OPENMP\n for (j = bsize; j < nvals; j += bsize)\n {\n HYPRE_Int i, n = hypre_min((j + bsize), nvals);\n\n for (i = j + 1; i < n; i++)\n {\n sums[i] += sums[j];\n }\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJVector_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " num_values = (HYPRE_Int)(vec_stop - vec_start) + 1;\n }\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_values; j++)\n {\n data[vecoffset + j * idxstride] = values[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJVector_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " num_values = (HYPRE_Int)(vec_stop - vec_start) + 1;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_values; j++)\n {\n data[vecoffset + j * idxstride] += values[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/aux_parcsr_matrix.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:local_num_rownnz) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ownnz;\n\n /* Count number of nonzero rows */\n local_num_rownnz = 0;\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < local_num_rows; i++)\n {\n if (row_space[i] > 0)\n {\n local_num_rownnz++;\n }\n } #pragma omp parallel for private(i) reduction(+:local_num_rownnz) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJVector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "l vector coefficients\");\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < total_size; i++)\n {\n buffer[i] = (HYPRE_Complex) f32buffer[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJVector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "l vector coefficients\");\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < total_size; i++)\n {\n buffer[i] = (HYPRE_Complex) f64buffer[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_cg.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ectorCopy(z, p);\n }\n else\n {\n beta = rho / rhom1;\n\n#ifdef HYPRE_USING_OPENMP\nfor ( i = 0; i < localNRows; i++ ) \n pData[i] = beta * pData[i] + zData[i];\n\n }\n hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, ap);\n sigma = hypre_ParVectorInnerProd(p, ap);\n alpha = rho /sigma;\n if ( PSmat_ == NULL )\n hypre_ParVectorAxpy(alpha, p, u); /* u = u + alpha p */\n else\n for ( i = 0; i < localNRows; i++ ) u2Data[i] += alpha * pData[i];\n\n hypre_ParVectorAxpy(-alpha, ap, r); /* r = r - alpha ap */\n if (tolerance_ != 0.0 && maxIterations_ > 1) \n rnorm = sqrt(hypre_ParVectorInnerProd(r, r));\n }\n\n /*-----------------------------------------------------------------\n * for domain decomposition, recover the solution vector\n *-----------------------------------------------------------------*/\n\n if ( PSmat_ != NULL )\n {\n uData = hypre_VectorData(hypre_ParVectorLocalVector(u));\n for ( i = 0; i < shortNRows; i++ ) uData[i] = u2Data[i];\n delete [] u2Data;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_amgsa_coarsen1.cxx", "omp_pragma_line": "#pragma omp parallel for private(irow,j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if ( threshold_ > 0.0 )\n {\n diagData = new double[AdiagNRows];\n\n#ifdef HYPRE_USING_OPENMP\nfor (irow = 0; irow < AdiagNRows; irow++)\n {\n for (j = AdiagRPtr[irow]; j < AdiagRPtr[irow+1]; j++)\n {\n if ( AdiagCols[j] == irow )\n {\n diagData[irow] = AdiagVals[j];\n break;\n }\n }\n } #pragma omp parallel for private(irow,j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_amgsa_coarsen1.cxx", "omp_pragma_line": "#pragma omp parallel for private(irow,j,jj,index,dcomp1,dcomp2) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n epsilon = epsilon * epsilon;\n rowLengths = new int[AdiagNRows];\n\n#ifdef HYPRE_USING_OPENMP\nfor ( irow = 0; irow < AdiagNRows; irow++ )\n {\n rowLengths[irow] = 0;\n index = startRow + irow;\n if ( localLabels != NULL ) labeli = localLabels[irow];\n else labeli = 0;\n if ( epsilon > 0.0 )\n {\n for (j = AdiagRPtr[irow]; j < AdiagRPtr[irow+1]; j++)\n {\n jj = AdiagCols[j];\n if ( localLabels != NULL ) labelj = localLabels[jj];\n else labelj = 0;\n if ( jj != irow )\n {\n dcomp1 = AdiagVals[j] * AdiagVals[j];\n if (dcomp1 > 0.0 && labeli == labelj) rowLengths[irow]++;\n }\n }\n }\n else\n {\n for (j = AdiagRPtr[irow]; j < AdiagRPtr[irow+1]; j++)\n {\n jj = AdiagCols[j];\n if ( localLabels != NULL ) labelj = localLabels[jj];\n else labelj = 0;\n if ( jj != irow && AdiagVals[j] != 0.0 && labeli == labelj )\n rowLengths[irow]++;\n }\n }\n } #pragma omp parallel for private(irow,j,jj,index,dcomp1,dcomp2) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_gs.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "le);\n commHandle = NULL;\n }\n\n if (nthreads > 1)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < localNRows; i++) tmpData[i] = uData[i];\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < nthreads; j++)\n {\n size = localNRows/nthreads;\n rest = localNRows - size*nthreads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* interior points first */\n {\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( ADiagA[ADiagI[i]] != zero)\n {\n res = fData[i];\n for (jj = ADiagI[i]; jj < ADiagI[i+1]; jj++)\n {\n ii = ADiagJ[jj];\n if (ii >= ns && ii < ne)\n res -= ADiagA[jj] * uData[ii];\n else\n res -= ADiagA[jj] * tmpData[ii];\n }\n for (jj = AOffdI[i]; jj < AOffdI[i+1]; jj++)\n {\n ii = AOffdJ[jj];\n res -= AOffdA[jj] * vExtData[ii];\n }\n uData[i] += relaxWeight * (res / ADiagA[ADiagI[i]]);\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_gs.cxx", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\n#endif\n for (i = 0; i < localNRows; i++) tmpData[i] = uData[i];\n\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < nthreads; j++)\n {\n size = localNRows/nthreads;\n rest = localNRows - size*nthreads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* interior points first */\n {\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( ADiagA[ADiagI[i]] != zero)\n {\n res = fData[i];\n for (jj = ADiagI[i]; jj < ADiagI[i+1]; jj++)\n {\n ii = ADiagJ[jj];\n if (ii >= ns && ii < ne)\n res -= ADiagA[jj] * uData[ii];\n else\n res -= ADiagA[jj] * tmpData[ii];\n }\n for (jj = AOffdI[i]; jj < AOffdI[i+1]; jj++)\n {\n ii = AOffdJ[jj];\n res -= AOffdA[jj] * vExtData[ii];\n }\n uData[i] += relaxWeight * (res / ADiagA[ADiagI[i]]);\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_jacobi.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " rData[i] = dtemp;\n }\n }\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor ( i = 0; i < localNRows; i++ ) \n uData[i] += weight * rData[i] * diagonal_[i];\n\n zeroInitialGuess_ = 0;\n }\n }\n else\n {\n if (numFpts_ != localNRows)\n {\n printf(\"MLI_Solver_Jacobi::solve ERROR : length mismatch.\\n\");\n exit(1);\n }\n f2 = (hypre_ParVector *) auxVec2_->getVector();\n u2 = (hypre_ParVector *) auxVec3_->getVector();\n fData = hypre_VectorData(hypre_ParVectorLocalVector(f));\n f2Data = hypre_VectorData(hypre_ParVectorLocalVector(f2));\n u2Data = hypre_VectorData(hypre_ParVectorLocalVector(u2));\n for (i = 0; i < numFpts_; i++) f2Data[i] = fData[FptList_[i]]; \n for (i = 0; i < numFpts_; i++) u2Data[i] = uData[FptList_[i]]; \n\n for ( is = 0; is < nSweeps_; is++ )\n {\n weight = relaxWeights_[is];\n hypre_ParVectorCopy(f2, r); \n if ( zeroInitialGuess_ == 0 )\n hypre_ParCSRMatrixMatvec(-1.0, A, u2, 1.0, r);\n \n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for ( i = 0; i < localNRows; i++ ) \n u2Data[i] += weight * rData[i] * diagonal_[i];\n\n zeroInitialGuess_ = 0;\n }\n for (i = 0; i < numFpts_; i++) uData[FptList_[i]] = u2Data[i]; \n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_jacobi.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "uess_ == 0 )\n hypre_ParCSRMatrixMatvec(-1.0, A, u2, 1.0, r);\n \n#ifdef HYPRE_USING_OPENMP\nfor ( i = 0; i < localNRows; i++ ) \n u2Data[i] += weight * rData[i] * diagonal_[i];\n\n zeroInitialGuess_ = 0;\n }\n for (i = 0; i < numFpts_; i++) uData[FptList_[i]] = u2Data[i]; \n }\n return 0;\n}\n\n/******************************************************************************\n * set Jacobi parameters\n *---------------------------------------------------------------------------*/\n\nint MLI_Solver_Jacobi::setParams( char *paramString, int argc, char **argv )\n{\n int i, *fList;\n double *weights=NULL;\n\n if ( !strcmp(paramString, \"numSweeps\") )\n {\n if ( argc != 1 ) \n {\n printf(\"MLI_Solver_Jacobi::setParams ERROR : needs 1 arg.\\n\");\n return 1;\n }\n nSweeps_ = *(int*) argv[0];\n if ( nSweeps_ < 1 ) nSweeps_ = 1;\n if ( relaxWeights_ != NULL ) delete [] relaxWeights_;\n relaxWeights_ = NULL;\n return 0;\n }\n else if ( !strcmp(paramString, \"setMaxEigen\") )\n {\n if ( argc != 1 ) \n {\n printf(\"MLI_Solver_Jacobi::setParams ERROR : needs 1 arg.\\n\");\n return 1;\n }\n maxEigen_ = *(double*) argv[0];\n return 0;\n }\n else if ( !strcmp(paramString, \"relaxWeight\") )\n {\n if ( argc != 2 && argc != 1 ) \n {\n printf(\"MLI_Solver_Jacobi::setParams ERROR : needs 1 or 2 args.\\n\");\n return 1;\n }\n if ( argc >= 1 ) nSweeps_ = *(int*) argv[0];\n if ( argc == 2 ) weights = (double*) argv[1];\n if ( nSweeps_ < 1 ) nSweeps_ = 1;\n if ( relaxWeights_ != NULL ) delete [] relaxWeights_;\n relaxWeights_ = NULL;\n if ( weights != NULL )\n {\n relaxWeights_ = new double[nSweeps_];\n for ( i = 0; i < nSweeps_; i++ ) relaxWeights_[i] = weights[i];\n }\n }\n else if ( !strcmp(paramString, \"zeroInitialGuess\") )\n {\n zeroInitialGuess_ = 1;\n return 0;\n }\n else if ( !strcmp(paramString, \"setModifiedDiag\") )\n {\n modifiedD_ |= 1;\n return 0;\n }\n else if ( !strcmp(paramString, \"useModifiedDiag\") )\n {\n modifiedD_ |= 2;\n return 0;\n }\n else if ( !strcmp(paramString, \"setFptList\") )\n {\n if ( argc != 2 ) \n {\n printf(\"MLI_Solver_Jacobi::setParams ERROR : needs 2 args.\\n\");\n return 1;\n }\n numFpts_ = *(int*) argv[0];\n fList = (int*) argv[1];\n if ( FptList_ != NULL ) delete [] FptList_;\n FptList_ = NULL;\n if (numFpts_ <= 0) return 0;\n FptList_ = new int[numFpts_];;\n for ( i = 0; i < numFpts_; i++ ) FptList_[i] = fList[i];\n return 0;\n }\n else if ( !strcmp(paramString, \"ownAmat\") )\n {\n ownAmat_ = 1;\n return 0;\n }\n#if 0\n else\n { \n printf(\"MLI_Solver_Jacobi::setParams - parameter not recognized.\\n\");\n printf(\" Params = %s\\n\", paramString);\n return 1;\n }\n\n return 0;\n} #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_mls.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n coef = mlsCf_[0] * mlsOver_;\n\n /* u = u + coef * Vtemp */\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < localNRows; i++) uData[i] += (coef * VtempData[i]);\n\n /* compute residual Vtemp = A u - f */\n\n hypre_ParVectorCopy(f,Vtemp); \n hypre_ParCSRMatrixMatvec(1.0, A, u, -1.0, Vtemp);\n\n /* compute residual Wtemp = (I - omega * A)^deg Vtemp */\n\n hypre_ParVectorCopy(Vtemp,Wtemp); \n for ( deg = 0; deg < mlsDeg_; deg++ ) \n {\n omega = mlsOm_[deg];\n hypre_ParCSRMatrixMatvec(1.0, A, Wtemp, 0.0, Vtemp);\n for (i = 0; i < localNRows; i++) \n WtempData[i] -= (omega * VtempData[i]);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_mls.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " /* compute u = u - coef * Vtemp */\n\n coef = mlsOver_ * mlsOm2_;\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < localNRows; i++) uData[i] -= ( coef * VtempData[i] );\n\n }\n else\n {\n /* Ytemp = coef * Vtemp */\n\n coef = mlsCf_[0];\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < localNRows; i++) YtempData[i] = (coef * VtempData[i]);\n\n /* Wtemp = coef * Vtemp */\n\n for ( deg = 1; deg < mlsDeg_; deg++ ) \n {\n hypre_ParCSRMatrixMatvec(1.0, A, Vtemp, 0.0, Wtemp);\n hypre_ParVectorCopy(Wtemp,Vtemp); \n coef = mlsCf_[deg];\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < localNRows; i++) \n YtempData[i] += ( coef * WtempData[i] );\n }\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < localNRows; i++) uData[i] += (mlsOver_ * YtempData[i]);\n\n /* compute residual Vtemp = A u - f */\n\n hypre_ParVectorCopy(f,Vtemp); \n hypre_ParCSRMatrixMatvec(1.0, A, u, -1.0, Vtemp);\n\n /* compute residual Wtemp = (I - omega * A)^deg Vtemp */\n\n hypre_ParVectorCopy(Vtemp,Wtemp); \n for ( deg = 0; deg < mlsDeg_; deg++ ) \n {\n omega = mlsOm_[deg];\n hypre_ParCSRMatrixMatvec(1.0, A, Wtemp, 0.0, Vtemp);\n for (i = 0; i < localNRows; i++) \n WtempData[i] -= (omega * VtempData[i]);\n }\n\n /* compute residual Vtemp = (I - omega * A)^deg Wtemp */\n\n hypre_ParVectorCopy(Wtemp,Vtemp); \n for ( deg = mlsDeg_-1; deg > -1; deg-- ) \n {\n omega = mlsOm_[deg];\n hypre_ParCSRMatrixMatvec(1.0, A, Vtemp, 0.0, Wtemp);\n for (i = 0; i < localNRows; i++) \n VtempData[i] -= (omega * WtempData[i]);\n }\n\n /* compute u = u - coef * Vtemp */\n\n coef = mlsOver_ * mlsOm2_;\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < localNRows; i++) uData[i] -= ( coef * VtempData[i] );\n\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_mls.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n else\n {\n /* Ytemp = coef * Vtemp */\n\n coef = mlsCf_[0];\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < localNRows; i++) YtempData[i] = (coef * VtempData[i]);\n\n /* Wtemp = coef * Vtemp */\n\n for ( deg = 1; deg < mlsDeg_; deg++ ) \n {\n hypre_ParCSRMatrixMatvec(1.0, A, Vtemp, 0.0, Wtemp);\n hypre_ParVectorCopy(Wtemp,Vtemp); \n coef = mlsCf_[deg];\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < localNRows; i++) \n YtempData[i] += ( coef * WtempData[i] );\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_mls.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " hypre_ParVectorCopy(Wtemp,Vtemp); \n coef = mlsCf_[deg];\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < localNRows; i++) \n YtempData[i] += ( coef * WtempData[i] );\n }\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < localNRows; i++) uData[i] += (mlsOver_ * YtempData[i]);\n\n /* compute residual Vtemp = A u - f */\n\n hypre_ParVectorCopy(f,Vtemp); \n hypre_ParCSRMatrixMatvec(1.0, A, u, -1.0, Vtemp);\n\n /* compute residual Wtemp = (I - omega * A)^deg Vtemp */\n\n hypre_ParVectorCopy(Vtemp,Wtemp); \n for ( deg = 0; deg < mlsDeg_; deg++ ) \n {\n omega = mlsOm_[deg];\n hypre_ParCSRMatrixMatvec(1.0, A, Wtemp, 0.0, Vtemp);\n for (i = 0; i < localNRows; i++) \n WtempData[i] -= (omega * VtempData[i]);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_mls.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "Rows; i++) \n YtempData[i] += ( coef * WtempData[i] );\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < localNRows; i++) uData[i] += (mlsOver_ * YtempData[i]);\n\n /* compute residual Vtemp = A u - f */\n\n hypre_ParVectorCopy(f,Vtemp); \n hypre_ParCSRMatrixMatvec(1.0, A, u, -1.0, Vtemp);\n\n /* compute residual Wtemp = (I - omega * A)^deg Vtemp */\n\n hypre_ParVectorCopy(Vtemp,Wtemp); \n for ( deg = 0; deg < mlsDeg_; deg++ ) \n {\n omega = mlsOm_[deg];\n hypre_ParCSRMatrixMatvec(1.0, A, Wtemp, 0.0, Vtemp);\n for (i = 0; i < localNRows; i++) \n WtempData[i] -= (omega * VtempData[i]);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/FEI_mv/femli/mli_solver_mls.cxx", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " /* compute u = u - coef * Vtemp */\n\n coef = mlsOver_ * mlsOm2_;\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < localNRows; i++) uData[i] -= ( coef * VtempData[i] );\n\n }\n return(0); \n}\n\n/******************************************************************************\n * set MLS parameters\n *---------------------------------------------------------------------------*/\n\nint MLI_Solver_MLS::setParams( char *paramString, int argc, char **argv )\n{\n if ( !strcmp(paramString, \"maxEigen\") )\n {\n if ( argc != 1 ) \n {\n printf(\"MLI_Solver_MLS::setParams ERROR : needs 1 or 2 args.\\n\");\n return 1;\n }\n maxEigen_ = *(double*) argv[0];\n if ( maxEigen_ < 0.0 ) \n {\n printf(\"MLI_Solver_MLS::setParams ERROR - maxEigen <= 0 (%e)\\n\", \n maxEigen_);\n maxEigen_ = 0.0;\n return 1;\n }\n }\n else if ( !strcmp(paramString, \"zeroInitialGuess\") )\n {\n zeroInitialGuess_ = 1;\n }\n return 0;\n} #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * blk_size; i++) { y_data[i] *= beta; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " alpha;\n\n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * blk_size; i++)\n {\n y_data[i] = 0.0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " y_data[i] = 0.0;\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * blk_size; i++)\n {\n y_data[i] *= temp;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,jj,b1,b2,temp) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " *-----------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n for (jj = A_i[i]; jj < A_i[i + 1]; jj++)\n {\n for (b1 = 0; b1 < blk_size; b1++)\n {\n temp = y_data[i * blk_size + b1];\n for (b2 = 0; b2 < blk_size; b2++)\n {\n temp += A_data[jj * bnnz + b1 * blk_size + b2] * x_data[A_j[jj] * blk_size + b2];\n }\n y_data[i * blk_size + b1] = temp;\n }\n }\n } #pragma omp parallel for private(i,jj,b1,b2,temp) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * blk_size; i++)\n {\n y_data[i] *= alpha;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * blk_size; i++)\n {\n y_data[i] *= beta;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " alpha;\n\n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * blk_size; i++)\n {\n y_data[i] = 0.0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " y_data[i] = 0.0;\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * blk_size; i++)\n {\n y_data[i] *= temp;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i, jj,j, b1, b2) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " *-----------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n for (jj = A_i[i]; jj < A_i[i + 1]; jj++) /*each nonzero in that row*/\n {\n for (b1 = 0; b1 < blk_size; b1++) /*row */\n {\n for (b2 = 0; b2 < blk_size; b2++) /*col*/\n {\n j = A_j[jj]; /*col */\n y_data[j * blk_size + b2] +=\n A_data[jj * bnnz + b1 * blk_size + b2] * x_data[i * blk_size + b1];\n }\n }\n }\n } #pragma omp parallel for private(i, jj,j, b1, b2) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_block_mv/csr_block_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * blk_size; i++)\n {\n y_data[i] *= alpha;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_block_mv/dense_block_matmult.c", "omp_pragma_line": "#pragma omp parallel for private(ib) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "re_DenseBlockMatrixNumNonzerosBlock(C);\n\n HYPRE_Int ib;\n\n#if defined(HYPRE_USING_OPENMP)\n for (ib = 0; ib < num_blocks; ib++)\n {\n HYPRE_Int i, j, k;\n HYPRE_Complex *data_A = hypre_DenseBlockMatrixData(A) + ib * num_nonzeros_block_A;\n HYPRE_Complex *data_B = hypre_DenseBlockMatrixData(B) + ib * num_nonzeros_block_B;\n HYPRE_Complex *data_C = hypre_DenseBlockMatrixData(C) + ib * num_nonzeros_block_C;\n\n for (i = 0; i < num_rows_block_C; i++)\n {\n for (j = 0; j < num_cols_block_C; j++)\n {\n for (k = 0; k < num_rows_block_B; k++)\n {\n /* C[i][j] += A[i][k] * B[k][j]; */\n hypre_DenseBlockMatrixDataIJ(C, data_C, i, j) +=\n hypre_DenseBlockMatrixDataIJ(A, data_A, i, k) *\n hypre_DenseBlockMatrixDataIJ(B, data_B, k, j);\n }\n }\n }\n } #pragma omp parallel for private(ib) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/distributed_ls/Euclid/blas_dh.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) firstprivate(alpha, x, y) \\", "context_chars": 100, "text": "_Real alpha, HYPRE_Real *x, HYPRE_Real *y)\n{\n START_FUNC_DH\n HYPRE_Int i;\n\n#ifdef USING_OPENMP_DH\nprivate(i) \n\n for (i=0; i #pragma omp parallel for schedule(static) firstprivate(alpha, x, y) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/distributed_ls/Euclid/blas_dh.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) firstprivate(yOUT, xIN) \\", "context_chars": 100, "text": "_Int n, HYPRE_Real *xIN, HYPRE_Real *yOUT)\n{\n START_FUNC_DH\n HYPRE_Int i;\n\n#ifdef USING_OPENMP_DH\nprivate(i)\n\n for (i=0; i #pragma omp parallel for schedule(static) firstprivate(yOUT, xIN) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/distributed_ls/Euclid/blas_dh.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) firstprivate(alpha, x) \\", "context_chars": 100, "text": "RE_Int n, HYPRE_Real alpha, HYPRE_Real *x)\n{\n START_FUNC_DH\n HYPRE_Int i;\n\n#ifdef USING_OPENMP_DH\nprivate(i)\n\n for (i=0; i #pragma omp parallel for schedule(static) firstprivate(alpha, x) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/distributed_ls/Euclid/blas_dh.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) firstprivate(x, y) \\", "context_chars": 100, "text": "\n{\n START_FUNC_DH\n HYPRE_Real result, local_result = 0.0;\n\n HYPRE_Int i;\n\n#ifdef USING_OPENMP_DH\nprivate(i) \\\n reduction(+:local_result)\n\n for (i=0; i #pragma omp parallel for schedule(static) firstprivate(x, y) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/distributed_ls/Euclid/blas_dh.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) firstprivate(x) \\", "context_chars": 100, "text": ")\n{\n START_FUNC_DH\n HYPRE_Real result, local_result = 0.0;\n HYPRE_Int i;\n\n#ifdef USING_OPENMP_DH\nprivate(i) \\\n reduction(+:local_result)\n\n for (i=0; i #pragma omp parallel for schedule(static) firstprivate(x) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/distributed_ls/ParaSails/Matrix.c", "omp_pragma_line": "#pragma omp parallel for private(row,len,ind,val,temp,i) schedule(static)", "context_chars": 100, "text": "l(mat->num_recv, mat->recv_req, mat->statuses);\n\n /* do the multiply */\n#ifdef HYPRE_USING_OPENMP\nfor (row=0; row<=mat->end_row - mat->beg_row; row++)\n {\n MatrixGetRow(mat, row, &len, &ind, &val);\n\n temp = 0.0;\n for (i=0; irecvbuf[ind[i]];\n }\n y[row] = temp;\n } #pragma omp parallel for private(row,len,ind,val,temp,i) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/struct_mv/boxloop_cuda.h", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ry_location);\n\n if (exec_policy == HYPRE_EXEC_HOST)\n {\n#ifdef HYPRE_USING_OPENMP\n for (HYPRE_Int idx = 0; idx < length; idx++)\n {\n loop_body(idx);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/struct_mv/_hypre_struct_mv.hpp", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ry_location);\n\n if (exec_policy == HYPRE_EXEC_HOST)\n {\n#ifdef HYPRE_USING_OPENMP\n for (HYPRE_Int idx = 0; idx < length; idx++)\n {\n loop_body(idx);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:ierr) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "* vecstride;\n\n HYPRE_Int i, ierr = 0;\n\n if (indices)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_values; i++)\n {\n HYPRE_BigInt index = indices[i] - base;\n if (index < first_index || index > last_index)\n {\n ierr++;\n }\n else\n {\n HYPRE_Int local_index = (HYPRE_Int) (index - first_index);\n values[i] = data[vecoffset + local_index * idxstride];\n }\n } #pragma omp parallel for private(i) reduction(+:ierr) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " hypre_error_in_arg(2);\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_values; i++)\n {\n values[i] = data[vecoffset + i * idxstride];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matrix.c", "omp_pragma_line": "#pragma omp parallel for private(i, ib, j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "g_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows_diag; i++)\n {\n ib = i / num_rows_block;\n A_diag_i[i] = i * num_cols_block;\n for (j = A_diag_i[i]; j < (i + 1) * num_cols_block; j++)\n {\n A_diag_j[j] = ib * num_cols_block + (j - A_diag_i[i]);\n }\n } #pragma omp parallel for private(i, ib, j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matrix.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "A_diag_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_nonzeros; i++)\n {\n A_diag_i[i] = A_diag_j[i] = i;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matrix.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "i < num_nonzeros; i++)\n {\n A_diag_i[i] = A_diag_j[i] = i;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = num_nonzeros; i < num_rows + 1; i++)\n {\n A_diag_i[i] = num_nonzeros;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matrix.c", "omp_pragma_line": "#pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ze = num_rows / num_threads;\n rest = num_rows - size * num_threads;\n\n#ifdef HYPRE_USING_OPENMP\n for (ii = 0; ii < num_threads; ii++)\n {\n HYPRE_Int ns, ne;\n if (ii < rest)\n {\n ns = ii * size + ii;\n ne = (ii + 1) * size + ii + 1;\n }\n else\n {\n ns = ii * size + rest;\n ne = (ii + 1) * size + rest;\n }\n count = diag_i[ns] + offd_i[ns];;\n for (i = ns; i < ne; i++)\n {\n matrix_i[i] = count;\n for (j = diag_i[i]; j < diag_i[i + 1]; j++)\n {\n matrix_data[count] = diag_data[j];\n matrix_j[count++] = (HYPRE_BigInt)diag_j[j] + first_col_diag;\n }\n for (j = offd_i[i]; j < offd_i[i + 1]; j++)\n {\n matrix_data[count] = offd_data[j];\n matrix_j[count++] = col_map_offd[offd_j[j]];\n }\n }\n } #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "p_offd_C,\n &col_map_offd_C_inverse);\n\n HYPRE_Int i, j;\n for (i = 0; i < num_cols_offd_A; i++)\n {\n for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i + 1]; j++)\n {\n //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]);\n B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]);\n }\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(ii, i, j)", "context_chars": 100, "text": ";\n }\n map_to_B[i] = cnt;\n cnt++;\n }\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (ii = 0; ii < num_threads; ii++)\n {\n HYPRE_Int *A_marker = NULL;\n HYPRE_Int ns, ne, A_col, num_cols, nmax;\n if (ii < rest)\n {\n ns = ii * size + ii;\n ne = (ii + 1) * size + ii + 1;\n }\n else\n {\n ns = ii * size + rest;\n ne = (ii + 1) * size + rest;\n }\n nmax = hypre_max(num_rows, num_cols_offd_B);\n A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST);\n\n for (i = 0; i < num_rows; i++)\n {\n A_marker[i] = -1;\n }\n\n for (i = ns; i < ne; i++)\n {\n D_tmp[i] = 1.0 / d[i];\n }\n\n num_cols = C_diag_i[ns];\n for (i = ns; i < ne; i++)\n {\n for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)\n {\n A_col = A_diag_j[j];\n if (A_marker[A_col] < C_diag_i[i])\n {\n A_marker[A_col] = num_cols;\n C_diag_j[num_cols] = A_col;\n C_diag_data[num_cols] = A_diag_data[j];\n num_cols++;\n }\n else\n {\n C_diag_data[A_marker[A_col]] += A_diag_data[j];\n }\n }\n for (j = B_diag_i[i]; j < B_diag_i[i + 1]; j++)\n {\n A_col = B_diag_j[j];\n if (A_marker[A_col] < C_diag_i[i])\n {\n A_marker[A_col] = num_cols;\n C_diag_j[num_cols] = A_col;\n C_diag_data[num_cols] = -D_tmp[i] * B_diag_data[j];\n num_cols++;\n }\n else\n {\n C_diag_data[A_marker[A_col]] -= D_tmp[i] * B_diag_data[j];\n }\n }\n }\n\n for (i = 0; i < num_cols_offd_B; i++)\n {\n A_marker[i] = -1;\n }\n\n num_cols = C_offd_i[ns];\n for (i = ns; i < ne; i++)\n {\n for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)\n {\n A_col = map_to_B[A_offd_j[j]];\n if (A_marker[A_col] < B_offd_i[i])\n {\n A_marker[A_col] = num_cols;\n C_offd_j[num_cols] = A_col;\n C_offd_data[num_cols] = A_offd_data[j];\n num_cols++;\n }\n else\n {\n C_offd_data[A_marker[A_col]] += A_offd_data[j];\n }\n }\n for (j = B_offd_i[i]; j < B_offd_i[i + 1]; j++)\n {\n A_col = B_offd_j[j];\n if (A_marker[A_col] < B_offd_i[i])\n {\n A_marker[A_col] = num_cols;\n C_offd_j[num_cols] = A_col;\n C_offd_data[num_cols] = -D_tmp[i] * B_offd_data[j];\n num_cols++;\n }\n else\n {\n C_offd_data[A_marker[A_col]] -= D_tmp[i] * B_offd_data[j];\n }\n }\n }\n hypre_TFree(A_marker, HYPRE_MEMORY_HOST);\n\n } #pragma omp parallel for private(ii, i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "d_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i <= num_sends; i++)\n {\n send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k)", "context_chars": 100, "text": "ts[num_sends] == num_nnz_send);\n\n /* fill the CSR matrix: j and a */\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows_send; i++)\n {\n HYPRE_Int i1 = send_i_offset[i];\n j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i);\n /* open row j and fill ja and a to send */\n for (k = A_diag_i[j]; k < A_diag_i[j + 1]; k++)\n {\n send_j[i1] = first_col + A_diag_j[k];\n if (want_data)\n {\n send_a[i1] = A_diag_a[k];\n }\n i1++;\n }\n if (num_procs > 1)\n {\n for (k = A_offd_i[j]; k < A_offd_i[j + 1]; k++)\n {\n send_j[i1] = col_map_offd_A[A_offd_j[k]];\n if (want_data)\n {\n send_a[i1] = A_offd_a[k];\n }\n i1++;\n }\n }\n hypre_assert(send_i_offset[i + 1] == i1);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,rowsum) reduction(max:maxsum) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\n maxsum = hypre_max(maxsum, maxsum_local);\n }\n }\n#else\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows_diag_A; i++)\n {\n rowsum = 0.0;\n for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)\n {\n rowsum += hypre_cabs(A_diag_a[j]);\n }\n for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)\n {\n rowsum += hypre_cabs(A_offd_a[j]);\n }\n\n maxsum = hypre_max(maxsum, rowsum);\n } #pragma omp parallel for private(i,j,rowsum) reduction(max:maxsum) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "[num_sends], HYPRE_MEMORY_HOST);\n#endif\n\n /* Pack send data */\n#if defined(HYPRE_USING_OPENMP)\n for (i = send_map_starts[0]; i < send_map_starts[num_sends]; i++)\n {\n send_rdbuf_data[i] = rd_data[send_map_elmts[i]];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " coef;\n\n switch (x_num_vectors)\n {\n case 1:\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < num_rows; i++)\n {\n x_data[i] = y_data[i] / A_data[A_i[i]];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i, coef) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " A_data[A_i[i]];\n }\n break;\n\n case 2:\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < num_rows; i++)\n {\n coef = 1.0 / A_data[A_i[i]];\n\n x_data[i] = y_data[i] * coef;\n x_data[i + x_vecstride] = y_data[i + y_vecstride] * coef;\n } #pragma omp parallel for private(i, coef) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i, coef) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "cstride] * coef;\n }\n break;\n\n case 3:\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < num_rows; i++)\n {\n coef = 1.0 / A_data[A_i[i]];\n\n x_data[i] = y_data[i] * coef;\n x_data[i + x_vecstride] = y_data[i + y_vecstride] * coef;\n x_data[i + 2 * x_vecstride] = y_data[i + 2 * y_vecstride] * coef;\n } #pragma omp parallel for private(i, coef) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i, coef) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "cstride] * coef;\n }\n break;\n\n case 4:\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < num_rows; i++)\n {\n coef = 1.0 / A_data[A_i[i]];\n\n x_data[i] = y_data[i] * coef;\n x_data[i + x_vecstride] = y_data[i + y_vecstride] * coef;\n x_data[i + 2 * x_vecstride] = y_data[i + 2 * y_vecstride] * coef;\n x_data[i + 3 * x_vecstride] = y_data[i + 3 * y_vecstride] * coef;\n } #pragma omp parallel for private(i, coef) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i, k, coef) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "stride] * coef;\n }\n break;\n\n default:\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < num_rows; i++)\n {\n coef = 1.0 / A_data[A_i[i]];\n\n for (k = 0; k < x_num_vectors; k++)\n {\n x_data[i + k * x_vecstride] = y_data[i + k * y_vecstride] * coef;\n }\n } #pragma omp parallel for private(i, k, coef) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_mv/par_csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------*/\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);\n i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);\n i++)\n {\n x_buf_data[i] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_offd; i++)\n {\n col_map_offd_S[i] = col_map_offd_A[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_offd; i++)\n {\n col_map_offd_S[i] = col_map_offd_A[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " S same nonzero structure as A */\n hypre_ParCSRMatrixCopy(A, S, 0);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_variables; i++)\n {\n diag = A_diag_data[A_diag_i[i]];\n\n /* compute scaling factor and row sum */\n row_scale = 0.0;\n row_sum = hypre_abs(diag);\n if (num_functions > 1)\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n if (dof_func[i] == dof_func[A_diag_j[jA]])\n {\n row_scale = hypre_max(row_scale, hypre_abs(A_diag_data[jA]));\n row_sum += hypre_abs(A_diag_data[jA]);\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n if (dof_func[i] == dof_func_offd[A_offd_j[jA]])\n {\n row_scale = hypre_max(row_scale, hypre_abs(A_offd_data[jA]));\n row_sum += hypre_abs(A_offd_data[jA]);\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n row_scale = hypre_max(row_scale, hypre_abs(A_diag_data[jA]));\n row_sum += hypre_abs(A_diag_data[jA]);\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n row_scale = hypre_max(row_scale, hypre_abs(A_offd_data[jA]));\n row_sum += hypre_abs(A_offd_data[jA]);\n }\n }\n\n /* compute row entries of S */\n S_diag_j[A_diag_i[i]] = -1; /* reject diag entry */\n if ( hypre_abs(row_sum) < hypre_abs(diag) * (2.0 - max_row_sum) && max_row_sum < 1.0 )\n {\n /* make all dependencies weak */\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n S_diag_j[jA] = -1;\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n S_offd_j[jA] = -1;\n }\n }\n else\n {\n if (num_functions > 1)\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n if (hypre_abs(A_diag_data[jA]) <= strength_threshold * row_scale\n || dof_func[i] != dof_func[A_diag_j[jA]])\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n if (hypre_abs(A_offd_data[jA]) <= strength_threshold * row_scale\n || dof_func[i] != dof_func_offd[A_offd_j[jA]])\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n if (hypre_abs(A_diag_data[jA]) <= strength_threshold * row_scale)\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n if (hypre_abs(A_offd_data[jA]) <= strength_threshold * row_scale)\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n }\n } #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "int_buf_data = hypre_TAlloc(HYPRE_BigInt, end, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (index = begin; index < end; index++)\n {\n big_int_buf_data[index - begin] = (HYPRE_BigInt)fine_to_coarse[send_map_elmts[index]] +\n my_first_cpt;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " int_buf_data = hypre_TAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (index = begin; index < end; index++)\n {\n int_buf_data[index - begin] = CF_marker[send_map_elmts[index]];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------*/\n S_int_i[0] = 0;\n num_nonzeros = 0;\n#ifdef HYPRE_USING_OPENMP\n for (j = begin; j < end; j++)\n {\n HYPRE_Int jrow = send_map_elmts[j];\n HYPRE_Int index = 0;\n for (k = S_diag_i[jrow]; k < S_diag_i[jrow + 1]; k++)\n {\n if (CF_marker[S_diag_j[k]] > 0) { index++; }\n }\n for (k = S_offd_i[jrow]; k < S_offd_i[jrow + 1]; k++)\n {\n if (CF_marker_offd[S_offd_j[k]] > 0) { index++; }\n }\n S_int_i[j - begin + 1] = index;\n num_nonzeros += S_int_i[j - begin + 1];\n } #pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "fd_C, &col_map_offd_C,\n &col_map_offd_C_inverse);\n\n for (i = 0 ; i < S_ext_offd_size; i++)\n {\n S_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, S_big_offd_j[i]);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "variables + num_cols_offd,\n HYPRE_MEMORY_HOST);\n\n for (i = 0; i < S_offd_i[num_variables]; i++)\n {\n #pragma omp atomic\n measure_array_temp[num_variables + S_offd_j[i]]++;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "++)\n {\n #pragma omp atomic\n measure_array_temp[num_variables + S_offd_j[i]]++;\n }\n\n for (i = 0; i < num_cols_offd; i++)\n {\n measure_array[i + num_variables] = measure_array_temp[i + num_variables];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "buf_data);\n }\n\n /* calculate the local part for the local nodes */\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < S_diag_i[num_variables]; i++)\n {\n #pragma omp atomic\n measure_array_temp[S_diag_j[i]]++;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "um_variables]; i++)\n {\n #pragma omp atomic\n measure_array_temp[S_diag_j[i]]++;\n }\n\n for (i = 0; i < num_variables; i++)\n {\n measure_array[i] = measure_array_temp[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "fd, graph_offd_size, CF_marker, CF_marker_offd);\n */\n\n#ifdef HYPRE_USING_OPENMP\n for (ig = 0; ig < graph_size; ig++)\n {\n i = graph_array[ig];\n if (measure_array[i] > 1)\n {\n CF_marker[i] = 1;\n }\n } #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n CF_marker[i] = 1;\n }\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (ig = 0; ig < graph_offd_size; ig++)\n {\n i = graph_array_offd[ig];\n if (measure_array[i + num_variables] > 1)\n {\n CF_marker_offd[i] = 1;\n }\n } #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(ig, i, jS, j, jj) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " *-------------------------------------------------------*/\n#ifdef HYPRE_USING_OPENMP\n for (ig = 0; ig < graph_size; ig++)\n {\n i = graph_array[ig];\n\n if (measure_array[i] > 1)\n {\n /* for each local neighbor j of i */\n for (jS = S_diag_i[i]; jS < S_diag_i[i + 1]; jS++)\n {\n j = S_diag_j[jS];\n if (measure_array[j] > 1)\n {\n if (measure_array[i] > measure_array[j])\n {\n CF_marker[j] = 0;\n }\n else if (measure_array[j] > measure_array[i])\n {\n CF_marker[i] = 0;\n }\n }\n }\n\n /* for each offd neighbor j of i */\n for (jS = S_offd_i[i]; jS < S_offd_i[i + 1]; jS++)\n {\n jj = S_offd_j[jS];\n j = num_variables + jj;\n if (measure_array[j] > 1)\n {\n if (measure_array[i] > measure_array[j])\n {\n CF_marker_offd[jj] = 0;\n }\n else if (measure_array[j] > measure_array[i])\n {\n CF_marker[i] = 0;\n }\n }\n }\n } /* for each node with measure > 1 */\n } #pragma omp parallel for private(ig, i, jS, j, jj) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(ig, i, jS, j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "d F-pts.\n *------------------------------------------------*/\n#ifdef HYPRE_USING_OPENMP\n for (ig = 0; ig < graph_size; ig++)\n {\n i = graph_array[ig];\n\n /*---------------------------------------------\n * If the measure of i is smaller than 1, then\n * make i and F point (because it does not influence\n * any other point)\n *---------------------------------------------*/\n\n if (measure_array[i] < 1)\n {\n CF_marker[i] = F_PT;\n }\n\n /*---------------------------------------------\n * First treat the case where point i is in the\n * independent set: make i a C point,\n *---------------------------------------------*/\n\n if (CF_marker[i] > 0)\n {\n CF_marker[i] = C_PT;\n }\n /*---------------------------------------------\n * Now treat the case where point i is not in the\n * independent set: loop over\n * all the points j that influence equation i; if\n * j is a C point, then make i an F point.\n *---------------------------------------------*/\n else\n {\n /* first the local part */\n for (jS = S_diag_i[i]; jS < S_diag_i[i + 1]; jS++)\n {\n /* j is the column number, or the local number of the point influencing i */\n j = S_diag_j[jS];\n if (CF_marker[j] > 0) /* j is a C-point */\n {\n CF_marker[i] = F_PT;\n }\n }\n /* now the external part */\n for (jS = S_offd_i[i]; jS < S_offd_i[i + 1]; jS++)\n {\n j = S_offd_j[jS];\n if (CF_marker_offd[j] > 0) /* j is a C-point */\n {\n CF_marker[i] = F_PT;\n }\n }\n } /* end else */\n } #pragma omp parallel for private(ig, i, jS, j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:n_coarse,n_SF) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n\n n_coarse = 0;\n n_SF = 0;\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++)\n {\n if (CF_marker[i] == 1)\n {\n n_coarse++;\n }\n else if (CF_marker[i] == -3)\n {\n n_SF++;\n }\n } #pragma omp parallel for private(i) reduction(+:n_coarse,n_SF) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:n_coarse_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "CSRCommHandleDestroy(comm_handle);\n }\n }\n\n n_coarse_offd = 0;\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_offd; i++)\n if (CF_marker_offd[i] == 1) { n_coarse_offd++; } #pragma omp parallel for private(i) reduction(+:n_coarse_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "c(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST);\n }\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_offd + 1; i++)\n { Pext_i[i] = 0; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < num_cols_offd + 1; i++)\n { Pext_i[i] = 0; }\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < send_map_start[num_sends]; i++)\n { P_ncols[i] = 0; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "[0] = 0;\n\n for (i = 0; i < num_sends; i++)\n {\n#ifdef HYPRE_USING_OPENMP\n for (j = send_map_start[i]; j < send_map_start[i + 1]; j++)\n {\n j1 = send_map_elmt[j];\n if (assigned[j1] == pass - 1)\n {\n P_ncols[j] = P_diag_i[j1 + 1] + P_offd_i[j1 + 1];\n Pext_send_size += P_ncols[j];\n }\n } #pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "i[i + 1] += P_offd_i[i];\n }\n\n /* determine P for coarse points */\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_coarse; i++)\n {\n i1 = C_array[i];\n P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1];\n P_diag_data[P_diag_i[i1]] = 1.0;\n } #pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ffd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < new_num_cols_offd; i++)\n { P_marker_offd[i] = 0; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "oc(HYPRE_BigInt, new_counter[num_passes - 1], HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < new_counter[num_passes - 1]; i++)\n {\n big_permute[i] = -1;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,big_k1) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n\n hypre_BigQsort0(col_map_offd_P, 0, num_cols_offd_P - 1);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < new_counter[num_passes - 1]; i++)\n {\n big_k1 = big_permute[i];\n if (big_k1 != -1)\n {\n permute[i] = hypre_BigBinarySearch(col_map_offd_P, big_k1, num_cols_offd_P);\n }\n } #pragma omp parallel for private(i,big_k1) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "earch(col_map_offd_P, big_k1, num_cols_offd_P);\n }\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n { P_offd_j[i] = permute[P_offd_j[i]]; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "CSRMatrixNumCols(P_offd) = num_cols_offd_P;\n }\n\n if (n_SF)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++)\n if (CF_marker[i] == -3) { CF_marker[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_lr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "er_offd, fine_to_coarse_offd);\n }\n\n hypre_MatvecCommPkgCreate(P);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++)\n {\n if (CF_marker[i] == -3)\n {\n CF_marker[i] = -1;\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/ams.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "omm_handle);\n hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (k = 0; k < num_threads; k++)\n {\n size = num_rows / num_threads;\n rest = num_rows - size * num_threads;\n if (k < rest)\n {\n ns = k * size + k;\n ne = (k + 1) * size + k + 1;\n }\n else\n {\n ns = k * size + rest;\n ne = (k + 1) * size + rest;\n }\n\n if (option == 1)\n {\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = 0.0;\n if (cf_marker == NULL)\n {\n /* Add the l1 norm of the diag part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)\n {\n l1_norm[i] += hypre_abs(A_diag_data[j]);\n }\n /* Add the l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)\n {\n l1_norm[i] += hypre_abs(A_offd_data[j]);\n }\n }\n }\n else\n {\n cf_diag = cf_marker[i];\n /* Add the CF l1 norm of the diag part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)\n if (cf_diag == cf_marker[A_diag_J[j]])\n {\n l1_norm[i] += hypre_abs(A_diag_data[j]);\n }\n /* Add the CF l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)\n if (cf_diag == cf_marker_offd[A_offd_J[j]])\n {\n l1_norm[i] += hypre_abs(A_offd_data[j]);\n }\n }\n }\n }\n }\n else if (option == 2)\n {\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = 0.0;\n if (cf_marker == NULL)\n {\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)\n {\n ii = A_diag_J[j];\n if (ii == i || ii < ns || ii >= ne)\n {\n l1_norm[i] += hypre_abs(A_diag_data[j]);\n }\n }\n /* Add the l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)\n {\n l1_norm[i] += hypre_abs(A_offd_data[j]);\n }\n }\n }\n else\n {\n cf_diag = cf_marker[i];\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)\n {\n ii = A_diag_J[j];\n if ((ii == i || ii < ns || ii >= ne) &&\n (cf_diag == cf_marker[A_diag_J[j]]))\n {\n l1_norm[i] += hypre_abs(A_diag_data[j]);\n }\n }\n /* Add the CF l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)\n {\n if (cf_diag == cf_marker_offd[A_offd_J[j]])\n {\n l1_norm[i] += hypre_abs(A_offd_data[j]);\n }\n }\n }\n }\n }\n }\n else if (option == 3)\n {\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = 0.0;\n for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)\n {\n l1_norm[i] += A_diag_data[j] * A_diag_data[j];\n }\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)\n {\n l1_norm[i] += A_offd_data[j] * A_offd_data[j];\n }\n }\n }\n }\n else if (option == 4)\n {\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = 0.0;\n if (cf_marker == NULL)\n {\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)\n {\n ii = A_diag_J[j];\n if (ii == i || ii < ns || ii >= ne)\n {\n if (ii == i)\n {\n diag = hypre_abs(A_diag_data[j]);\n l1_norm[i] += hypre_abs(A_diag_data[j]);\n }\n else\n {\n l1_norm[i] += 0.5 * hypre_abs(A_diag_data[j]);\n }\n }\n }\n\n /* Add the l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)\n {\n l1_norm[i] += 0.5 * hypre_abs(A_offd_data[j]);\n }\n }\n }\n else\n {\n cf_diag = cf_marker[i];\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)\n {\n ii = A_diag_J[j];\n if ((ii == i || ii < ns || ii >= ne) &&\n (cf_diag == cf_marker[A_diag_J[j]]))\n {\n if (ii == i)\n {\n diag = hypre_abs(A_diag_data[j]);\n l1_norm[i] += hypre_abs(A_diag_data[j]);\n }\n else\n {\n l1_norm[i] += 0.5 * hypre_abs(A_diag_data[j]);\n }\n }\n }\n\n /* Add the CF l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)\n {\n if (cf_diag == cf_marker_offd[A_offd_J[j]])\n {\n l1_norm[i] += 0.5 * hypre_abs(A_offd_data[j]);\n }\n }\n }\n }\n\n /* Truncate according to Remark 6.2 */\n if (l1_norm[i] <= 4.0 / 3.0 * diag)\n {\n l1_norm[i] = diag;\n }\n }\n }\n else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */\n {\n /* Set the diag element */\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = A_diag_data[A_diag_I[i]];\n if (l1_norm[i] == 0) { l1_norm[i] = 1.0; }\n }\n }\n else if (option == 6)\n {\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = 0.0;\n\n if (cf_marker == NULL)\n {\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)\n {\n ii = A_diag_J[j];\n if (ii == i || ii < ns || ii >= ne)\n {\n if (ii == i)\n {\n diag = hypre_abs(A_diag_data[j]);\n }\n else\n {\n l1_norm[i] += 0.5 * hypre_abs(A_diag_data[j]);\n }\n }\n }\n /* Add the l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)\n {\n l1_norm[i] += 0.5 * hypre_abs(A_offd_data[j]);\n }\n }\n\n l1_norm[i] = (diag + l1_norm[i] + hypre_sqrt(diag * diag + l1_norm[i] * l1_norm[i])) * 0.5;\n }\n else\n {\n cf_diag = cf_marker[i];\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)\n {\n ii = A_diag_J[j];\n if ((ii == i || ii < ns || ii >= ne) &&\n (cf_diag == cf_marker[A_diag_J[j]]))\n {\n if (ii == i)\n {\n diag = hypre_abs(A_diag_data[j]);\n }\n else\n {\n l1_norm[i] += 0.5 * hypre_abs(A_diag_data[j]);\n }\n }\n }\n /* Add the CF l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)\n {\n if (cf_diag == cf_marker_offd[A_offd_J[j]])\n {\n l1_norm[i] += 0.5 * hypre_abs(A_offd_data[j]);\n }\n }\n }\n\n l1_norm[i] = (diag + l1_norm[i] + hypre_sqrt(diag * diag + l1_norm[i] * l1_norm[i])) * 0.5;\n }\n }\n }\n\n if (option < 5)\n {\n /* Handle negative definite matrices */\n for (i = ns; i < ne; i++)\n if (A_diag_data[A_diag_I[i]] < 0)\n {\n l1_norm[i] = -l1_norm[i];\n }\n\n for (i = ns; i < ne; i++)\n /* if (hypre_abs(l1_norm[i]) < DBL_EPSILON) */\n if (hypre_abs(l1_norm[i]) == 0.0)\n {\n hypre_error_in_arg(1);\n break;\n }\n }\n\n } #pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " beta = gamma / gamma_old;\n\n /* p = s + beta p */\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < local_size; j++)\n {\n p_data[j] = s_data[j] + beta * p_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_nongalerkin.c", "omp_pragma_line": "#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " S same nonzero structure as A */\n hypre_ParCSRMatrixCopy(A, S, 1);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_variables; i++)\n {\n diag = A_diag_data[A_diag_i[i]];\n\n /* compute scaling factor and row sum */\n row_scale = 0.0;\n row_sum = diag;\n if (num_functions > 1)\n {\n if (diag < 0)\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n if (dof_func[i] == dof_func[A_diag_j[jA]])\n {\n row_scale = hypre_max(row_scale, A_diag_data[jA]);\n row_sum += A_diag_data[jA];\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n if (dof_func[i] == dof_func_offd[A_offd_j[jA]])\n {\n row_scale = hypre_max(row_scale, A_offd_data[jA]);\n row_sum += A_offd_data[jA];\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n if (dof_func[i] == dof_func[A_diag_j[jA]])\n {\n row_scale = hypre_min(row_scale, A_diag_data[jA]);\n row_sum += A_diag_data[jA];\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n if (dof_func[i] == dof_func_offd[A_offd_j[jA]])\n {\n row_scale = hypre_min(row_scale, A_offd_data[jA]);\n row_sum += A_offd_data[jA];\n }\n }\n }\n }\n else\n {\n if (diag < 0)\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n row_scale = hypre_max(row_scale, A_diag_data[jA]);\n row_sum += A_diag_data[jA];\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n row_scale = hypre_max(row_scale, A_offd_data[jA]);\n row_sum += A_offd_data[jA];\n }\n }\n else\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n row_scale = hypre_min(row_scale, A_diag_data[jA]);\n row_sum += A_diag_data[jA];\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n row_scale = hypre_min(row_scale, A_offd_data[jA]);\n row_sum += A_offd_data[jA];\n }\n }\n }\n\n /* compute row entries of S */\n S_diag_j[A_diag_i[i]] = -1;\n if ((hypre_abs(row_sum) > hypre_abs(diag)*max_row_sum) && (max_row_sum < 1.0))\n {\n /* make all dependencies weak */\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n S_diag_j[jA] = -1;\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n S_offd_j[jA] = -1;\n }\n }\n else\n {\n if (num_functions > 1)\n {\n if (diag < 0)\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n if (A_diag_data[jA] <= strength_threshold * row_scale\n || dof_func[i] != dof_func[A_diag_j[jA]])\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n if (A_offd_data[jA] <= strength_threshold * row_scale\n || dof_func[i] != dof_func_offd[A_offd_j[jA]])\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n if (A_diag_data[jA] >= strength_threshold * row_scale\n || dof_func[i] != dof_func[A_diag_j[jA]])\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n if (A_offd_data[jA] >= strength_threshold * row_scale\n || dof_func[i] != dof_func_offd[A_offd_j[jA]])\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n }\n else\n {\n if (diag < 0)\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n if (A_diag_data[jA] <= strength_threshold * row_scale)\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n if (A_offd_data[jA] <= strength_threshold * row_scale)\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)\n {\n if (A_diag_data[jA] >= strength_threshold * row_scale)\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)\n {\n if (A_offd_data[jA] >= strength_threshold * row_scale)\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_nongalerkin.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "drop-tolerance to compute new entries for sparsity pattern\n */\n /*#ifdef HYPRE_USING_OPENMP\n */\n for (i = 0; i < num_variables; i++)\n {\n global_row = i + first_col_diag_RAP;\n\n /* Compute the drop tolerance for this row, which is just\n * abs(max of row i)*droptol */\n max_entry = -1.0;\n for (j = RAP_diag_i[i]; j < RAP_diag_i[i + 1]; j++)\n {\n if ( (RAP_diag_j[j] != i) && (max_entry < hypre_abs(RAP_diag_data[j]) ) )\n { max_entry = hypre_abs(RAP_diag_data[j]); }\n }\n for (j = RAP_offd_i[i]; j < RAP_offd_i[i + 1]; j++)\n {\n {\n if ( max_entry < hypre_abs(RAP_offd_data[j]) )\n { max_entry = hypre_abs(RAP_offd_data[j]); }\n }\n }\n max_entry *= droptol;\n max_entry_offd = max_entry * collapse_beta;\n\n /* Loop over diag portion, adding all entries that are \"strong\" */\n for (j = RAP_diag_i[i]; j < RAP_diag_i[i + 1]; j++)\n {\n if ( hypre_abs(RAP_diag_data[j]) > max_entry )\n {\n global_col = RAP_diag_j[j] + first_col_diag_RAP;\n /*#ifdef HYPRE_USING_OPENMP\n #pragma omp critical (IJAdd)\n \n {*/\n /* For efficiency, we do a buffered IJAddToValues\n * A[global_row, global_col] += 1.0 */\n hypre_NonGalerkinIJBufferWrite(Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,\n &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols,\n global_row, global_col, 1.0);\n if (sym_collapse)\n {\n hypre_NonGalerkinIJBufferWrite(Pattern, &ijbuf_sym_cnt,\n ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,\n &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,\n global_col, global_row, 1.0);\n }\n /*}*/\n }\n }\n\n /* Loop over offd portion, adding all entries that are \"strong\" */\n for (j = RAP_offd_i[i]; j < RAP_offd_i[i + 1]; j++)\n {\n if ( hypre_abs(RAP_offd_data[j]) > max_entry_offd )\n {\n global_col = col_map_offd_RAP[ RAP_offd_j[j] ];\n /*#ifdef HYPRE_USING_OPENMP\n #pragma omp critical (IJAdd)\n \n {*/\n /* For efficiency, we do a buffered IJAddToValues\n * A[global_row, global_col] += 1.0 */\n hypre_NonGalerkinIJBufferWrite(Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,\n &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols,\n global_row, global_col, 1.0);\n if (sym_collapse)\n {\n hypre_NonGalerkinIJBufferWrite(Pattern, &ijbuf_sym_cnt,\n ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,\n &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,\n global_col, global_row, 1.0);\n }\n /*}*/\n }\n }\n\n } #pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mod_lr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],\n new_ncols_P_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mod_lr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],\n new_ncols_P_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mod_lr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],\n new_ncols_P_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "0, v);\n mult = coefs[i];\n /* u = mult * r + v */\n#ifdef HYPRE_USING_OPENMP\n for ( j = 0; j < num_rows; j++ )\n {\n u_data[j] = mult * r_data[j] + v_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " * r_data[j] + v_data[j];\n }\n }\n\n /* u = o + u */\n#ifdef HYPRE_USING_OPENMP\n for ( i = 0; i < num_rows; i++ )\n {\n u_data[i] = orig_u[i] + u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ixMatvec(-1.0, A, u, 0.0, tmp_vec);\n /* r = ds .* (f + tmp) */\n#ifdef HYPRE_USING_OPENMP\n for ( j = 0; j < num_rows; j++ )\n {\n r_data[j] = ds_data[j] * (f_data[j] + tmp_data[j]);\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ultiplying r by the cheby coef.*/\n\n /* o = u; u = r * coef */\n#ifdef HYPRE_USING_OPENMP\n for ( j = 0; j < num_rows; j++ )\n {\n orig_u[j] = u_data[j]; /* orig, unscaled u */\n\n u_data[j] = r_data[j] * coefs[cheby_order];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " /* v = D^(-1/2)AD^(-1/2)u */\n /* tmp = ds .* u */\n#ifdef HYPRE_USING_OPENMP\n for ( j = 0; j < num_rows; j++ )\n {\n tmp_data[j] = ds_data[j] * u_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " mult = coefs[i];\n\n /* u = coef * r + ds .* v */\n#ifdef HYPRE_USING_OPENMP\n for ( j = 0; j < num_rows; j++ )\n {\n u_data[j] = mult * r_data[j] + ds_data[j] * v_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "data before adding it to u_orig*/\n\n /* u = orig_u + ds .* u */\n#ifdef HYPRE_USING_OPENMP\n for ( j = 0; j < num_rows; j++ )\n {\n u_data[j] = orig_u[j] + ds_data[j] * u_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap_communication.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "_MEMORY_HOST);\n\n my_first_cpt = hypre_ParCSRMatrixColStarts(RT)[0];\n\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) { coarse_shift = coarse_counter[j - 1]; }\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n fine_to_coarse[i] += coarse_shift;\n }\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------*/\n\n /* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n /* removed */\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "apStart(comm_pkg,\n num_sends), HYPRE_MEMORY_HOST);*/\n\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) { coarse_shift = coarse_counter[j - 1]; }\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n fine_to_coarse[i] += coarse_shift;\n }\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n HYPRE_Int kk;\n HYPRE_Int indices[1000]; /* kludge */\n\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n\n kk = 0;\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n jj_counter++;\n indices[kk] = i1;\n kk++;\n }\n }\n\n hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs,\n kk, indices, &P_diag_data[P_diag_i[i]]);\n\n /* Off-Diagonal part of P */\n /* undone */\n }\n }\n } #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "arker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_marker[i] = P_offd_j[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "offd; i++)\n {\n tmp_map_offd[i] = P_marker[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,\n P_offd_j[i],\n num_cols_P_offd);\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n }\n\n if (num_cols_P_offd)\n {\n hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;\n hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------*/\n\n /* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_S_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n strong_f_marker = -2;\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *--------------------------------------------------------------*/\n\n else\n {\n P_marker[i1] = strong_f_marker;\n }\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else\n {\n P_marker_offd[i1] = strong_f_marker;\n }\n }\n }\n\n jj_end_row_offd = jj_counter_offd;\n\n /* Loop over ith row of S. First, the diagonal part of S */\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += S_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *--------------------------------------------------------------*/\n\n else if (P_marker[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*-----------------------------------------------------------\n * Loop over row of S for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++)\n {\n i2 = S_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row)\n {\n sum += S_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++)\n {\n i2 = S_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd)\n {\n sum += S_offd_data[jj1];\n }\n }\n }\n\n if (sum != 0)\n {\n distribute = S_diag_data[jj] / sum;\n\n /*-----------------------------------------------------------\n * Loop over row of S for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++)\n {\n i2 = S_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row)\n P_diag_data[P_marker[i2]]\n += distribute * S_diag_data[jj1];\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++)\n {\n i2 = S_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd)\n P_offd_data[P_marker_offd[i2]]\n += distribute * S_offd_data[jj1];\n }\n }\n }\n else\n {\n /* do nothing */\n }\n }\n\n /*--------------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *--------------------------------------------------------------*/\n\n else\n {\n /* do nothing */\n }\n\n }\n\n\n /*----------------------------------------------------------------\n * Still looping over ith row of S. Next, loop over the\n * off-diagonal part of S\n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += S_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *-----------------------------------------------------------*/\n\n else if (P_marker_offd[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*---------------------------------------------------------\n * Loop over row of S_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = S_offd_j[jj];\n\n for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++)\n {\n big_i2 = S_ext_j[jj1];\n\n if (big_i2 >= col_1 && big_i2 < col_n)\n {\n /* in the diagonal block */\n if (P_marker[(HYPRE_Int)(big_i2 - col_1)] >= jj_begin_row)\n {\n sum += S_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd);\n if (j != -1)\n {\n if (P_marker_offd[j] >= jj_begin_row_offd)\n {\n sum += S_ext_data[jj1];\n }\n }\n\n }\n\n }\n\n if (sum != 0)\n {\n distribute = S_offd_data[jj] / sum;\n /*---------------------------------------------------------\n * Loop over row of S_ext for point i1 and do\n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n\n for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++)\n {\n big_i2 = S_ext_j[jj1];\n\n if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */\n {\n if (P_marker[(HYPRE_Int)(big_i2 - col_1)] >= jj_begin_row)\n P_diag_data[P_marker[(HYPRE_Int)(big_i2 - col_1)]]\n += distribute * S_ext_data[jj1];\n }\n else\n {\n /* check to see if it is in the off_diagonal block */\n j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd);\n if (j != -1)\n {\n if (P_marker_offd[j] >= jj_begin_row_offd)\n P_offd_data[P_marker_offd[j]]\n += distribute * S_ext_data[jj1];\n }\n }\n }\n }\n else\n {\n /* do nothing */\n }\n }\n\n /*-----------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *-----------------------------------------------------------*/\n\n else\n {\n /* do nothing */\n }\n\n }\n }\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n sum = 0.;\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n sum += P_diag_data[jj];\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n sum += P_offd_data[jj];\n }\n\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= sum;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= sum;\n }\n\n }\n\n strong_f_marker--;\n\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "arker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_marker[i] = P_offd_j[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_gsmg.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "offd; i++)\n {\n tmp_map_offd[i] = P_marker[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,\n P_offd_j[i],\n num_cols_P_offd);\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n }\n\n if (num_cols_P_offd)\n {\n hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;\n hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------*/\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n //printf(\"u_old[%d] = %e\\n\",i,Vtemp_data[i]);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------*/\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n //printf(\"u_old[%d] = %e\\n\",i,Vtemp_data[i]);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:B_diag_num_rows) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ws = A_diag_num_rows;\n }\n else\n {\n#if !defined(_MSC_VER) && defined(HYPRE_USING_OPENMP)\n for (i = 0; i < A_diag_num_rows; i++)\n {\n B_diag_num_rows += (CF_marker[i] == point_type) ? 1 : 0;\n } #pragma omp parallel for private(i) reduction(+:B_diag_num_rows) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------*/\n\n /* RDF: this looks a little tricky, but doable */\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n /*--------------------------------------------------------------------\n * If i is a F-point, we loop through the columns and select\n * the F-columns. Also set up mapping vector.\n *--------------------------------------------------------------------*/\n\n if (col_cf_marker[i] > 0)\n {\n fine_to_coarse[i] = col_coarse_counter[j];\n col_coarse_counter[j]++;\n }\n\n if (row_cf_marker[i] > 0)\n {\n //fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n if (col_cf_marker[i1] > 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n if (CF_marker_offd[i1] > 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " // if (debug_flag==4) wall_time = time_getWallclockSeconds();\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) { coarse_shift = col_coarse_counter[j - 1]; }\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n fine_to_coarse[i] += coarse_shift;\n }\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " // if (debug_flag==4) wall_time = time_getWallclockSeconds();\n#if 0\n#ifdef HYPRE_USING_OPENMP\n // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt;\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE\n\n\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n ii_counter = 0;\n for (i = ns; i < ne; i++)\n {\n /*--------------------------------------------------------------------\n * If i is a F-point, we loop through the columns and select\n * the F-columns. Also set up mapping vector.\n *--------------------------------------------------------------------*/\n if (row_cf_marker[i] > 0)\n {\n // Diagonal part of Ablock //\n Ablock_diag_i[ii_counter] = jj_counter;\n for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n if (col_cf_marker[i1] > 0)\n {\n Ablock_diag_j[jj_counter] = fine_to_coarse[i1];\n Ablock_diag_data[jj_counter] = A_diag_data[jj];\n jj_counter++;\n }\n }\n\n // Off-Diagonal part of Ablock //\n Ablock_offd_i[ii_counter] = jj_counter_offd;\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n if (CF_marker_offd[i1] > 0)\n {\n Ablock_offd_j[jj_counter_offd] = i1;\n Ablock_offd_data[jj_counter_offd] = A_offd_data[jj];\n jj_counter_offd++;\n }\n }\n }\n ii_counter++;\n }\n }\n Ablock_offd_i[ii_counter] = jj_counter_offd;\n Ablock_diag_i[ii_counter] = jj_counter;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "(i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt;\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n ii_counter = 0;\n for (i = ns; i < ne; i++)\n {\n /*--------------------------------------------------------------------\n * If i is a F-point, we loop through the columns and select\n * the F-columns. Also set up mapping vector.\n *--------------------------------------------------------------------*/\n if (row_cf_marker[i] > 0)\n {\n // Diagonal part of Ablock //\n Ablock_diag_i[ii_counter] = jj_counter;\n for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n if (col_cf_marker[i1] > 0)\n {\n Ablock_diag_j[jj_counter] = fine_to_coarse[i1];\n Ablock_diag_data[jj_counter] = A_diag_data[jj];\n jj_counter++;\n }\n }\n\n // Off-Diagonal part of Ablock //\n Ablock_offd_i[ii_counter] = jj_counter_offd;\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n if (CF_marker_offd[i1] > 0)\n {\n Ablock_offd_j[jj_counter_offd] = i1;\n Ablock_offd_data[jj_counter_offd] = A_offd_data[jj];\n jj_counter_offd++;\n }\n }\n }\n ii_counter++;\n }\n }\n Ablock_offd_i[ii_counter] = jj_counter_offd;\n Ablock_diag_i[ii_counter] = jj_counter;\n } #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_A_offd; i++)\n {\n Ablock_marker[i] = 0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "= 0) { index++; }\n tmp_map_offd[i] = index++;\n }\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < Ablock_offd_size; i++)\n Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd,\n Ablock_offd_j[i],\n num_cols_Ablock_offd);\n hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST);\n }\n\n if (num_cols_Ablock_offd)\n {\n hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock;\n hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "y = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST);\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < local_numrows; i++)\n {\n CF_marker_copy[i] = -CF_marker[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " 2 * (send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0]), 16 * hypre_NumThreads());\n\n for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)\n {\n HYPRE_Int key = send_map_elmts_RT[i];\n hypre_UnorderedIntSetPut(&send_map_elmts_set, key);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ", 2 * send_map_elmts_unique_size,\n 16 * hypre_NumThreads());\n for (i = 0; i < send_map_elmts_unique_size; i++)\n {\n hypre_UnorderedIntMapPutIfAbsent(&send_map_elmts_RT_inverse_map, send_map_elmts_unique[i], i);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "arts_RT[num_sends_RT],\n HYPRE_MEMORY_HOST);\n\n for (i = 0; i < send_map_elmts_unique_size; i++)\n {\n send_map_elmts_starts_RT_aggregated[i] = 0;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "elmts_unique_size; i++)\n {\n send_map_elmts_starts_RT_aggregated[i] = 0;\n }\n\n for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)\n {\n HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);\n #pragma omp atomic\n send_map_elmts_starts_RT_aggregated[idx]++;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "p_elmts_starts_RT_aggregated[send_map_elmts_unique_size] = send_map_starts_RT[num_sends_RT];\n\n for (i = send_map_starts_RT[num_sends_RT] - 1; i >= send_map_starts_RT[0]; i--)\n {\n HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);\n HYPRE_Int offset = hypre_fetch_and_add(send_map_elmts_starts_RT_aggregated + idx, -1) - 1;\n send_map_elmts_RT_aggregated[offset] = i;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "&col_map_offd_Pext,\n &col_map_offd_Pext_inverse);\n\n for (i = 0 ; i < P_ext_offd_size; i++)\n //Ps_ext_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, Ps_ext_j[i]);\n {\n P_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, P_big_offd_j[i]);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (ii = 0; ii < num_threads; ii++)\n {\n size = num_cols_offd_RT / num_threads;\n rest = num_cols_offd_RT - size * num_threads;\n if (ii < rest)\n {\n ns = ii * size + ii;\n ne = (ii + 1) * size + ii + 1;\n }\n else\n {\n ns = ii * size + rest;\n ne = (ii + 1) * size + rest;\n }\n\n /*-----------------------------------------------------------------------\n * Allocate marker arrays.\n *-----------------------------------------------------------------------*/\n\n if (num_cols_offd_Pext || num_cols_diag_P)\n {\n P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P + num_cols_offd_Pext,\n HYPRE_MEMORY_HOST);\n P_marker = P_mark_array[ii];\n }\n A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST);\n A_marker = A_mark_array[ii];\n /*-----------------------------------------------------------------------\n * Initialize some stuff.\n *-----------------------------------------------------------------------*/\n\n jj_counter = start_indexing;\n for (ic = 0; ic < num_cols_diag_P + num_cols_offd_Pext; ic++)\n {\n P_marker[ic] = -1;\n }\n for (i = 0; i < num_nz_cols_A; i++)\n {\n A_marker[i] = -1;\n }\n\n /*-----------------------------------------------------------------------\n * Loop over exterior c-points\n *-----------------------------------------------------------------------*/\n\n for (ic = ns; ic < ne; ic++)\n {\n\n jj_row_begining = jj_counter;\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of R_offd.\n *--------------------------------------------------------------------*/\n\n for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic + 1]; jj1++)\n {\n i1 = R_offd_j[jj1];\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_offd.\n *-----------------------------------------------------------------*/\n\n for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1 + 1]; jj2++)\n {\n i2 = A_offd_j[jj2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2] = ic;\n\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_ext.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2 + 1]; jj3++)\n {\n i3 = P_ext_diag_j[jj3];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n jj_counter++;\n }\n }\n for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2 + 1]; jj3++)\n {\n i3 = P_ext_offd_j[jj3] + num_cols_diag_P;\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n jj_counter++;\n }\n }\n }\n }\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_diag.\n *-----------------------------------------------------------------*/\n\n for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1 + 1]; jj2++)\n {\n i2 = A_diag_j[jj2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2 + num_cols_offd_A] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2 + num_cols_offd_A] = ic;\n\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_diag.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2 + 1]; jj3++)\n {\n i3 = P_diag_j[jj3];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n jj_counter++;\n }\n }\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_offd.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2 + 1]; jj3++)\n {\n i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n jj_counter++;\n }\n }\n }\n }\n }\n }\n\n jj_count[ii] = jj_counter;\n\n } #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "----------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (ii = 0; ii < num_threads; ii++)\n {\n size = num_cols_offd_RT / num_threads;\n rest = num_cols_offd_RT - size * num_threads;\n if (ii < rest)\n {\n ns = ii * size + ii;\n ne = (ii + 1) * size + ii + 1;\n }\n else\n {\n ns = ii * size + rest;\n ne = (ii + 1) * size + rest;\n }\n\n /*-----------------------------------------------------------------------\n * Initialize some stuff.\n *-----------------------------------------------------------------------*/\n if (num_cols_offd_Pext || num_cols_diag_P)\n {\n P_marker = P_mark_array[ii];\n }\n A_marker = A_mark_array[ii];\n\n jj_counter = start_indexing;\n if (ii > 0) { jj_counter = jj_count[ii - 1]; }\n\n for (ic = 0; ic < num_cols_diag_P + num_cols_offd_Pext; ic++)\n {\n P_marker[ic] = -1;\n }\n for (i = 0; i < num_nz_cols_A; i++)\n {\n A_marker[i] = -1;\n }\n\n /*-----------------------------------------------------------------------\n * Loop over exterior c-points.\n *-----------------------------------------------------------------------*/\n\n for (ic = ns; ic < ne; ic++)\n {\n\n jj_row_begining = jj_counter;\n RAP_int_i[ic] = jj_counter;\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of R_offd.\n *--------------------------------------------------------------------*/\n\n for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic + 1]; jj1++)\n {\n i1 = R_offd_j[jj1];\n r_entry = R_offd_data[jj1];\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_offd.\n *-----------------------------------------------------------------*/\n\n for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1 + 1]; jj2++)\n {\n i2 = A_offd_j[jj2];\n r_a_product = r_entry * A_offd_data[jj2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2] = ic;\n\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_ext.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2 + 1]; jj3++)\n {\n i3 = P_ext_diag_j[jj3];\n r_a_p_product = r_a_product * P_ext_diag_data[jj3];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n RAP_int_data[jj_counter] = r_a_p_product;\n RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P;\n jj_counter++;\n }\n else\n {\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n\n for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2 + 1]; jj3++)\n {\n i3 = P_ext_offd_j[jj3] + num_cols_diag_P;\n r_a_p_product = r_a_product * P_ext_offd_data[jj3];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n RAP_int_data[jj_counter] = r_a_p_product;\n RAP_int_j[jj_counter]\n = col_map_offd_Pext[i3 - num_cols_diag_P];\n jj_counter++;\n }\n else\n {\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n }\n\n /*--------------------------------------------------------------\n * If i2 is previously visited ( A_marker[12]=ic ) it yields\n * no new entries in RAP and can just add new contributions.\n *--------------------------------------------------------------*/\n\n else\n {\n for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2 + 1]; jj3++)\n {\n i3 = P_ext_diag_j[jj3];\n r_a_p_product = r_a_product * P_ext_diag_data[jj3];\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2 + 1]; jj3++)\n {\n i3 = P_ext_offd_j[jj3] + num_cols_diag_P;\n r_a_p_product = r_a_product * P_ext_offd_data[jj3];\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n }\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_diag.\n *-----------------------------------------------------------------*/\n\n for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1 + 1]; jj2++)\n {\n i2 = A_diag_j[jj2];\n r_a_product = r_entry * A_diag_data[jj2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2 + num_cols_offd_A] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2 + num_cols_offd_A] = ic;\n\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_diag.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2 + 1]; jj3++)\n {\n i3 = P_diag_j[jj3];\n r_a_p_product = r_a_product * P_diag_data[jj3];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n RAP_int_data[jj_counter] = r_a_p_product;\n RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P;\n jj_counter++;\n }\n else\n {\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2 + 1]; jj3++)\n {\n i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;\n r_a_p_product = r_a_product * P_offd_data[jj3];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n RAP_int_data[jj_counter] = r_a_p_product;\n RAP_int_j[jj_counter] =\n col_map_offd_Pext[i3 - num_cols_diag_P];\n jj_counter++;\n }\n else\n {\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n }\n\n /*--------------------------------------------------------------\n * If i2 is previously visited ( A_marker[12]=ic ) it yields\n * no new entries in RAP and can just add new contributions.\n *--------------------------------------------------------------*/\n\n else\n {\n for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2 + 1]; jj3++)\n {\n i3 = P_diag_j[jj3];\n r_a_p_product = r_a_product * P_diag_data[jj3];\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2 + 1]; jj3++)\n {\n i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;\n r_a_p_product = r_a_product * P_offd_data[jj3];\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n }\n }\n }\n if (num_cols_offd_Pext || num_cols_diag_P)\n {\n hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST);\n }\n hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < RAP_ext_size; i++)\n if (RAP_ext_j[i] < first_col_diag_RAP\n || RAP_ext_j[i] > last_col_diag_RAP)\n RAP_ext_j[i] = (HYPRE_BigInt)num_cols_diag_P\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n + (HYPRE_BigInt)hypre_UnorderedBigIntMapGet(&col_map_offd_RAP_inverse, RAP_ext_j[i]);\n#else\n +(HYPRE_BigInt)hypre_BigBinarySearch(col_map_offd_RAP, RAP_ext_j[i], num_cols_offd_RAP);\n\n else\n {\n RAP_ext_j[i] -= first_col_diag_RAP;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (ii = 0; ii < num_threads; ii++)\n {\n size = num_cols_diag_RT / num_threads;\n rest = num_cols_diag_RT - size * num_threads;\n if (ii < rest)\n {\n ns = ii * size + ii;\n ne = (ii + 1) * size + ii + 1;\n }\n else\n {\n ns = ii * size + rest;\n ne = (ii + 1) * size + rest;\n }\n\n P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P + num_cols_offd_RAP,\n HYPRE_MEMORY_HOST);\n A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST);\n P_marker = P_mark_array[ii];\n A_marker = A_mark_array[ii];\n jj_count_diag = start_indexing;\n jj_count_offd = start_indexing;\n\n for (ic = 0; ic < num_cols_diag_P + num_cols_offd_RAP; ic++)\n {\n P_marker[ic] = -1;\n }\n for (i = 0; i < num_nz_cols_A; i++)\n {\n A_marker[i] = -1;\n }\n\n /*-----------------------------------------------------------------------\n * Loop over interior c-points.\n *-----------------------------------------------------------------------*/\n\n for (ic = ns; ic < ne; ic++)\n {\n\n /*--------------------------------------------------------------------\n * Set marker for diagonal entry, RAP_{ic,ic}. and for all points\n * being added to row ic of RAP_diag and RAP_offd through RAP_ext\n *--------------------------------------------------------------------*/\n\n jj_row_begin_diag = jj_count_diag;\n jj_row_begin_offd = jj_count_offd;\n\n if (square)\n {\n P_marker[ic] = jj_count_diag++;\n }\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n if (send_map_elmts_RT_inverse_map_initialized)\n {\n HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);\n if (i != -1)\n {\n for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1];\n j++)\n {\n HYPRE_Int jj = send_map_elmts_RT_aggregated[j];\n for (k = RAP_ext_i[jj]; k < RAP_ext_i[jj + 1]; k++)\n {\n jcol = (HYPRE_Int)RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n jj_count_diag++;\n }\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n }\n } // if (set)\n }\n#else /* !HYPRE_CONCURRENT_HOPSCOTCH */\n for (i = 0; i < num_sends_RT; i++)\n for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i + 1]; j++)\n if (send_map_elmts_RT[j] == ic)\n {\n for (k = RAP_ext_i[j]; k < RAP_ext_i[j + 1]; k++)\n {\n jcol = (HYPRE_Int) RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n jj_count_diag++;\n }\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n break;\n }\n /* !HYPRE_CONCURRENT_HOPSCOTCH */\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of R_diag.\n *--------------------------------------------------------------------*/\n\n for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic + 1]; jj1++)\n {\n i1 = R_diag_j[jj1];\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_offd.\n *-----------------------------------------------------------------*/\n\n if (num_cols_offd_A)\n {\n for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1 + 1]; jj2++)\n {\n i2 = A_offd_j[jj2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2] = ic;\n\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_ext.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2 + 1]; jj3++)\n {\n i3 = P_ext_diag_j[jj3];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begin_diag)\n {\n P_marker[i3] = jj_count_diag;\n jj_count_diag++;\n }\n }\n for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2 + 1]; jj3++)\n {\n i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]] + num_cols_diag_P;\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begin_offd)\n {\n P_marker[i3] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n }\n }\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_diag.\n *-----------------------------------------------------------------*/\n\n for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1 + 1]; jj2++)\n {\n i2 = A_diag_j[jj2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2 + num_cols_offd_A] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2 + num_cols_offd_A] = ic;\n\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_diag.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2 + 1]; jj3++)\n {\n i3 = P_diag_j[jj3];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begin_diag)\n {\n P_marker[i3] = jj_count_diag;\n jj_count_diag++;\n }\n }\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_offd.\n *-----------------------------------------------------------*/\n\n if (num_cols_offd_P)\n {\n for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2 + 1]; jj3++)\n {\n i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P;\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begin_offd)\n {\n P_marker[i3] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n }\n }\n }\n\n /*--------------------------------------------------------------------\n * Set RAP_diag_i and RAP_offd_i for this row.\n *--------------------------------------------------------------------*/\n /*\n RAP_diag_i[ic] = jj_row_begin_diag;\n RAP_offd_i[ic] = jj_row_begin_offd;\n */\n }\n jj_cnt_diag[ii] = jj_count_diag;\n jj_cnt_offd[ii] = jj_count_offd;\n } #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (ii = 0; ii < num_threads; ii++)\n {\n size = num_cols_diag_RT / num_threads;\n rest = num_cols_diag_RT - size * num_threads;\n if (ii < rest)\n {\n ns = ii * size + ii;\n ne = (ii + 1) * size + ii + 1;\n }\n else\n {\n ns = ii * size + rest;\n ne = (ii + 1) * size + rest;\n }\n\n /*-----------------------------------------------------------------------\n * Initialize some stuff.\n *-----------------------------------------------------------------------*/\n\n P_marker = P_mark_array[ii];\n A_marker = A_mark_array[ii];\n for (ic = 0; ic < num_cols_diag_P + num_cols_offd_RAP; ic++)\n {\n P_marker[ic] = -1;\n }\n for (i = 0; i < num_nz_cols_A ; i++)\n {\n A_marker[i] = -1;\n }\n\n jj_count_diag = start_indexing;\n jj_count_offd = start_indexing;\n if (ii > 0)\n {\n jj_count_diag = jj_cnt_diag[ii - 1];\n jj_count_offd = jj_cnt_offd[ii - 1];\n }\n\n // temporal matrix RA = R*A\n // only need to store one row per thread because R*A and (R*A)*P are fused\n // into one loop.\n hypre_CSRMatrix RA_diag, RA_offd;\n RA_diag.data = RA_diag_data_array + num_cols_diag_A * ii;\n RA_diag.j = RA_diag_j_array + num_cols_diag_A * ii;\n RA_diag.num_nonzeros = 0;\n RA_offd.num_nonzeros = 0;\n\n if (num_cols_offd_A)\n {\n RA_offd.data = RA_offd_data_array + num_cols_offd_A * ii;\n RA_offd.j = RA_offd_j_array + num_cols_offd_A * ii;\n }\n else\n {\n RA_offd.data = NULL;\n RA_offd.j = NULL;\n }\n\n /*-----------------------------------------------------------------------\n * Loop over interior c-points.\n *-----------------------------------------------------------------------*/\n\n for (ic = ns; ic < ne; ic++)\n {\n\n /*--------------------------------------------------------------------\n * Create diagonal entry, RAP_{ic,ic} and add entries of RAP_ext\n *--------------------------------------------------------------------*/\n\n jj_row_begin_diag = jj_count_diag;\n jj_row_begin_offd = jj_count_offd;\n RAP_diag_i[ic] = jj_row_begin_diag;\n RAP_offd_i[ic] = jj_row_begin_offd;\n\n HYPRE_Int ra_row_begin_diag = RA_diag.num_nonzeros;\n HYPRE_Int ra_row_begin_offd = RA_offd.num_nonzeros;\n\n if (square)\n {\n P_marker[ic] = jj_count_diag;\n RAP_diag_data[jj_count_diag] = zero;\n RAP_diag_j[jj_count_diag] = ic;\n jj_count_diag++;\n }\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n if (send_map_elmts_RT_inverse_map_initialized)\n {\n HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);\n if (i != -1)\n {\n for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1];\n j++)\n {\n HYPRE_Int jj = send_map_elmts_RT_aggregated[j];\n for (k = RAP_ext_i[jj]; k < RAP_ext_i[jj + 1]; k++)\n {\n jcol = (HYPRE_Int)RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n RAP_diag_data[jj_count_diag]\n = RAP_ext_data[k];\n RAP_diag_j[jj_count_diag] = jcol;\n jj_count_diag++;\n }\n else\n RAP_diag_data[P_marker[jcol]]\n += RAP_ext_data[k];\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n RAP_offd_data[jj_count_offd]\n = RAP_ext_data[k];\n RAP_offd_j[jj_count_offd]\n = jcol - num_cols_diag_P;\n jj_count_offd++;\n }\n else\n RAP_offd_data[P_marker[jcol]]\n += RAP_ext_data[k];\n }\n }\n }\n } // if (set)\n }\n#else /* !HYPRE_CONCURRENT_HOPSCOTCH */\n for (i = 0; i < num_sends_RT; i++)\n for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i + 1]; j++)\n if (send_map_elmts_RT[j] == ic)\n {\n for (k = RAP_ext_i[j]; k < RAP_ext_i[j + 1]; k++)\n {\n jcol = (HYPRE_Int)RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n RAP_diag_data[jj_count_diag]\n = RAP_ext_data[k];\n RAP_diag_j[jj_count_diag] = jcol;\n jj_count_diag++;\n }\n else\n RAP_diag_data[P_marker[jcol]]\n += RAP_ext_data[k];\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n RAP_offd_data[jj_count_offd]\n = RAP_ext_data[k];\n RAP_offd_j[jj_count_offd]\n = jcol - num_cols_diag_P;\n jj_count_offd++;\n }\n else\n RAP_offd_data[P_marker[jcol]]\n += RAP_ext_data[k];\n }\n }\n break;\n }\n /* !HYPRE_CONCURRENT_HOPSCOTCH */\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of R_diag and compute row ic of RA.\n *--------------------------------------------------------------------*/\n\n for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic + 1]; jj1++)\n {\n i1 = R_diag_j[jj1];\n r_entry = R_diag_data[jj1];\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_offd.\n *-----------------------------------------------------------------*/\n\n if (num_cols_offd_A)\n {\n for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1 + 1]; jj2++)\n {\n i2 = A_offd_j[jj2];\n HYPRE_Real a_entry = A_offd_data[jj2];\n HYPRE_Int marker = A_marker[i2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (marker < ra_row_begin_offd)\n {\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2] = RA_offd.num_nonzeros;\n RA_offd.data[RA_offd.num_nonzeros - ra_row_begin_offd] = r_entry * a_entry;\n RA_offd.j[RA_offd.num_nonzeros - ra_row_begin_offd] = i2;\n RA_offd.num_nonzeros++;\n }\n /*--------------------------------------------------------------\n * If i2 is previously visited ( A_marker[12]=ic ) it yields\n * no new entries in RA and can just add new contributions.\n *--------------------------------------------------------------*/\n else\n {\n RA_offd.data[marker - ra_row_begin_offd] += r_entry * a_entry;\n // JSP: compiler will more likely to generate FMA instructions\n // when we don't eliminate common subexpressions of\n // r_entry * A_offd_data[jj2] manually.\n }\n } // loop over entries in row i1 of A_offd\n } // num_cols_offd_A\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_diag.\n *-----------------------------------------------------------------*/\n\n for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1 + 1]; jj2++)\n {\n i2 = A_diag_j[jj2];\n HYPRE_Real a_entry = A_diag_data[jj2];\n HYPRE_Int marker = A_marker[i2 + num_cols_offd_A];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (marker < ra_row_begin_diag)\n {\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n A_marker[i2 + num_cols_offd_A] = RA_diag.num_nonzeros;\n RA_diag.data[RA_diag.num_nonzeros - ra_row_begin_diag] = r_entry * a_entry;\n RA_diag.j[RA_diag.num_nonzeros - ra_row_begin_diag] = i2;\n RA_diag.num_nonzeros++;\n }\n /*--------------------------------------------------------------\n * If i2 is previously visited ( A_marker[12]=ic ) it yields\n * no new entries in RA and can just add new contributions.\n *--------------------------------------------------------------*/\n else\n {\n RA_diag.data[marker - ra_row_begin_diag] += r_entry * a_entry;\n }\n } // loop over entries in row i1 of A_diag\n } // loop over entries in row ic of R_diag\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of RA_offd.\n *--------------------------------------------------------------------*/\n\n for (jj1 = ra_row_begin_offd; jj1 < RA_offd.num_nonzeros; jj1++)\n {\n i1 = RA_offd.j[jj1 - ra_row_begin_offd];\n r_a_product = RA_offd.data[jj1 - ra_row_begin_offd];\n\n /*-----------------------------------------------------------\n * Loop over entries in row i1 of P_ext.\n *-----------------------------------------------------------*/\n for (jj2 = P_ext_diag_i[i1]; jj2 < P_ext_diag_i[i1 + 1]; jj2++)\n {\n i2 = P_ext_diag_j[jj2];\n HYPRE_Real p_entry = P_ext_diag_data[jj2];\n HYPRE_Int marker = P_marker[i2];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i2} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n if (marker < jj_row_begin_diag)\n {\n P_marker[i2] = jj_count_diag;\n RAP_diag_data[jj_count_diag] = r_a_product * p_entry;\n RAP_diag_j[jj_count_diag] = i2;\n jj_count_diag++;\n }\n else\n {\n RAP_diag_data[marker] += r_a_product * p_entry;\n }\n }\n for (jj2 = P_ext_offd_i[i1]; jj2 < P_ext_offd_i[i1 + 1]; jj2++)\n {\n i2 = map_Pext_to_RAP[P_ext_offd_j[jj2]] + num_cols_diag_P;\n HYPRE_Real p_entry = P_ext_offd_data[jj2];\n HYPRE_Int marker = P_marker[i2];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i2} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n if (marker < jj_row_begin_offd)\n {\n P_marker[i2] = jj_count_offd;\n RAP_offd_data[jj_count_offd] = r_a_product * p_entry;\n RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;\n jj_count_offd++;\n }\n else\n {\n RAP_offd_data[marker] += r_a_product * p_entry;\n }\n }\n } // loop over entries in row ic of RA_offd\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of RA_diag.\n *--------------------------------------------------------------------*/\n\n for (jj1 = ra_row_begin_diag; jj1 < RA_diag.num_nonzeros; jj1++)\n {\n HYPRE_Int i1 = RA_diag.j[jj1 - ra_row_begin_diag];\n HYPRE_Real r_a_product = RA_diag.data[jj1 - ra_row_begin_diag];\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of P_diag.\n *-----------------------------------------------------------------*/\n for (jj2 = P_diag_i[i1]; jj2 < P_diag_i[i1 + 1]; jj2++)\n {\n i2 = P_diag_j[jj2];\n HYPRE_Real p_entry = P_diag_data[jj2];\n HYPRE_Int marker = P_marker[i2];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i2} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (marker < jj_row_begin_diag)\n {\n P_marker[i2] = jj_count_diag;\n RAP_diag_data[jj_count_diag] = r_a_product * p_entry;\n RAP_diag_j[jj_count_diag] = i2;\n jj_count_diag++;\n }\n else\n {\n RAP_diag_data[marker] += r_a_product * p_entry;\n }\n }\n if (num_cols_offd_P)\n {\n for (jj2 = P_offd_i[i1]; jj2 < P_offd_i[i1 + 1]; jj2++)\n {\n i2 = map_P_to_RAP[P_offd_j[jj2]] + num_cols_diag_P;\n HYPRE_Real p_entry = P_offd_data[jj2];\n HYPRE_Int marker = P_marker[i2];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i2} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (marker < jj_row_begin_offd)\n {\n P_marker[i2] = jj_count_offd;\n RAP_offd_data[jj_count_offd] = r_a_product * p_entry;\n RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;\n jj_count_offd++;\n }\n else\n {\n RAP_offd_data[marker] += r_a_product * p_entry;\n }\n }\n } // num_cols_offd_P\n } // loop over entries in row ic of RA_diag.\n } // Loop over interior c-points.\n hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST);\n hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "rker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RAP, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_offd_RAP; i++)\n {\n P_marker[i] = -1;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ffd_RAP; i++)\n {\n P_marker[i] = -1;\n }\n\n jj_count_offd = 0;\n#ifdef HYPRE_USING_ATOMIC\n for (i = 0; i < RAP_offd_size; i++)\n {\n i3 = RAP_offd_j[i];\n#ifdef HYPRE_USING_ATOMIC\n if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1)\n {\n jj_count_offd++;\n }\n#else\n if (P_marker[i3])\n {\n P_marker[i3] = 0;\n jj_count_offd++;\n }\n\n } #pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "w_col_map_offd_RAP[jj_counter++] = col_map_offd_RAP[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < RAP_offd_size; i++)\n {\n i3 = RAP_offd_j[i];\n RAP_offd_j[i] = P_marker[i3];\n } #pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);\n#ifdef HYPRE_USING_OPENMP\n for (i = begin; i < end; ++i)\n {\n int_buf_data[i - begin] =\n IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends);\n#ifdef HYPRE_USING_OPENMP\n for (i = begin; i < end; ++i)\n {\n int_buf_data[i - begin] =\n IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);\n#ifdef HYPRE_USING_OPENMP\n for (i = begin; i < end; ++i)\n {\n int_buf_data[i - begin] = offset +\n (HYPRE_BigInt) IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends);\n#ifdef HYPRE_USING_OPENMP\n for (i = begin; i < end; ++i)\n {\n int_buf_data[i - begin] = offset +\n (HYPRE_BigInt) IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "nt i;\n\n /* Quicker initialization */\n if (offd_n < diag_n)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < offd_n; i++)\n {\n diag_ftc[i] = -1;\n offd_ftc[i] = -1;\n tmp_CF[i] = -1;\n if (diag_pm != NULL)\n {\n diag_pm[i] = -1;\n }\n if (offd_pm != NULL)\n {\n offd_pm[i] = -1;\n }\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "!= NULL)\n {\n offd_pm[i] = -1;\n }\n }\n#ifdef HYPRE_USING_OPENMP\n for (i = offd_n; i < diag_n; i++)\n {\n diag_ftc[i] = -1;\n if (diag_pm != NULL)\n {\n diag_pm[i] = -1;\n }\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "{\n diag_pm[i] = -1;\n }\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < diag_n; i++)\n {\n diag_ftc[i] = -1;\n offd_ftc[i] = -1;\n tmp_CF[i] = -1;\n if (diag_pm != NULL)\n {\n diag_pm[i] = -1;\n }\n if (offd_pm != NULL)\n {\n offd_pm[i] = -1;\n }\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "!= NULL)\n {\n offd_pm[i] = -1;\n }\n }\n#ifdef HYPRE_USING_OPENMP\n for (i = diag_n; i < offd_n; i++)\n {\n offd_ftc[i] = -1;\n tmp_CF[i] = -1;\n if (offd_pm != NULL)\n {\n offd_pm[i] = -1;\n }\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " 2 * num_cols_A_offd,\n 16 * hypre_NumThreads());\n\n for (i = 0; i < num_cols_A_offd; i++)\n {\n hypre_UnorderedBigIntMapPutIfAbsent(&col_map_offd_inverse, col_map_offd[i], i);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for private(kk,big_k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\n /* Set column indices for Sop and A_ext such that offd nodes are\n * negatively indexed */\n for (i = 0; i < num_cols_A_offd; i++)\n {\n if (CF_marker_offd[i] < 0)\n {\n for (kk = Sop_i[i]; kk < Sop_i[i + 1]; kk++)\n {\n big_k1 = Sop_j[kk];\n if (big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n))\n {\n got_loc = hypre_UnorderedBigIntMapGet(&tmp_found_inverse, big_k1);\n loc_col = got_loc + num_cols_A_offd;\n Sop_j[kk] = (HYPRE_BigInt)(-loc_col - 1);\n }\n }\n for (kk = A_ext_i[i]; kk < A_ext_i[i + 1]; kk++)\n {\n big_k1 = A_ext_j[kk];\n if (big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n))\n {\n got_loc = hypre_UnorderedBigIntMapGet(&tmp_found_inverse, big_k1);\n loc_col = got_loc + num_cols_A_offd;\n A_ext_j[kk] = (HYPRE_BigInt)(-loc_col - 1);\n }\n }\n }\n } #pragma omp parallel for private(kk,big_k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " hypre_ParCSRCommHandle *comm_handle;\n HYPRE_Int i;\n\n\n#ifdef HYPRE_USING_OPENMP\n for (i = begin; i < end; ++i)\n {\n int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "= hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < full_off_procNodes; i++)\n {\n P_marker[i] = 0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "offd is then set to the\n * total number of times P_marker is set */\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n index = P_offd_j[i];\n if (tmp_CF_marker_offd[index] >= 0)\n {\n P_marker[index] = 1;\n }\n } #pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " &col_map_offd_P_inverse);\n\n // find old idx -> new idx map\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < full_off_procNodes; i++)\n {\n P_marker[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_P_inverse, fine_to_coarse_offd[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n hypre_UnorderedBigIntMapDestroy(&col_map_offd_P_inverse);\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_offd_j[i] = P_marker[P_offd_j[i]];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_amgdd_fac_cycle.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " *-----------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)\n {\n owned_tmp[i] = owned_u[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_amgdd_fac_cycle.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "OwnedNodes(compGrid); i++)\n {\n owned_tmp[i] = owned_u[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedNodes(compGrid); i++)\n {\n nonowned_tmp[i] = nonowned_u[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_amgdd_fac_cycle.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " *-----------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)\n {\n if (cf_marker[i] == relax_set)\n {\n res = owned_f[i];\n for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i + 1]; j++)\n {\n res -= hypre_CSRMatrixData(owned_diag)[j] * owned_tmp[ hypre_CSRMatrixJ(owned_diag)[j] ];\n }\n for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i + 1]; j++)\n {\n res -= hypre_CSRMatrixData(owned_offd)[j] * nonowned_tmp[ hypre_CSRMatrixJ(owned_offd)[j] ];\n }\n owned_u[i] += (relax_weight * res) / l1_norms[i];\n }\n } #pragma omp parallel for private(i,j,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_2s_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],\n new_ncols_P_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_2s_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i], new_ncols_P_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_add_cycle.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " hypre_ParVectorCopy(F_array[fine_grid], Vtemp);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n u_data[i] = relax_weight[level] * v_data[i] / A_data[A_i[i]];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_add_cycle.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " hypre_ParVectorCopy(F_array[fine_grid], Vtemp);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n u_data[i] += v_data[i] / hypre_VectorData(l1_norms_lvl)[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_add_cycle.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde));\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_global; i++)\n {\n x_global[i] += D_inv[i] * r_global[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " *-----------------------------------------------------------------*/\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " *-----------------------------------------------------------------*/\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n const HYPRE_Complex di = l1_norms ? l1_norms[i] : A_diag_data[A_diag_i[i]];\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F or All ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n * Relax only C or F points as determined by relax_points.\n *-----------------------------------------------------------*/\n if ( (relax_points == 0 || cf_marker[i] == relax_points) && di != zero )\n {\n res = f_data[i];\n for (jj = A_diag_i[i] + Skip_diag; jj < A_diag_i[i + 1]; jj++)\n {\n ii = A_diag_j[jj];\n res -= A_diag_data[jj] * Vtemp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * v_ext_data[ii];\n }\n\n if (Skip_diag)\n {\n u_data[i] *= one_minus_weight;\n u_data[i] += relax_weight * res / di;\n }\n else\n {\n u_data[i] += relax_weight * res / di;\n }\n }\n } #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "E_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);\n#ifdef HYPRE_USING_OPENMP\n for (j = begin; j < end; j++)\n {\n v_buf_data[j - begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "endif\n\n if ( (num_threads > 1 || !non_scale) && Vtemp_data )\n {\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_rows; j++)\n {\n Vtemp_data[j] = u_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "emp_data[j] = u_data[j];\n }\n }\n\n if (num_threads > 1)\n {\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n HYPRE_Int ns, ne, sweep;\n hypre_partition1D(num_rows, num_threads, j, &ns, &ne);\n\n for (sweep = 0; sweep < num_sweeps; sweep++)\n {\n const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1;\n const HYPRE_Int ibegin = iorder > 0 ? ns : ne - 1;\n const HYPRE_Int iend = iorder > 0 ? ne : ns - 1;\n\n if (non_scale)\n {\n hypre_HybridGaussSeidelNSThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,\n f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data,\n ns, ne, ibegin, iend, iorder, Skip_diag);\n }\n else\n {\n hypre_HybridGaussSeidelThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,\n f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega,\n prod, l1_norms, u_data, Vtemp_data, v_ext_data, ns, ne, ibegin, iend, iorder, Skip_diag);\n }\n } /* for (sweep = 0; sweep < num_sweeps; sweep++) */\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "_ParCSRCommHandleDestroy(comm_handle);\n comm_handle = NULL;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F or All) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n * Relax only C or F points as determined by relax_points.\n *-----------------------------------------------------------*/\n if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero )\n {\n res = f_data[i];\n for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)\n {\n ii = A_diag_j[jj];\n res -= A_diag_data[jj] * u_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * v_ext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------*/\n\n /* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) { coarse_shift = coarse_counter[j - 1]; }\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n fine_to_coarse[i] += coarse_shift;\n }\n //fine_to_coarse[i] += my_first_cpt+coarse_shift;\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }\n\n /*#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; */\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE\n\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n strong_f_marker = -2;\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n P_marker[i1] = strong_f_marker;\n }\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n }\n }\n }\n\n jj_end_row_offd = jj_counter_offd;\n\n diagonal = A_diag_data[A_diag_i[i]];\n\n\n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *--------------------------------------------------------------*/\n\n else if (P_marker[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n sgn = 1;\n if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row &&\n (sgn * A_diag_data[jj1]) < 0)\n {\n sum += A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n }\n\n if (sum != 0)\n {\n distribute = A_diag_data[jj] / sum;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]]\n += distribute * A_offd_data[jj1];\n }\n }\n }\n }\n else\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n }\n\n /*--------------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n\n }\n\n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the\n * off-diagonal part of A\n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *-----------------------------------------------------------*/\n\n else if (P_marker_offd[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n sgn = 1;\n if (A_ext_data[A_ext_i[c_num]] < 0) { sgn = -1; }\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n\n if (i2 > -1)\n {\n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n\n }\n\n }\n\n if (sum != 0)\n {\n distribute = A_offd_data[jj] / sum;\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do\n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n\n if (i2 > -1) /* in the diagonal block */\n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2 - 1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n else\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n }\n\n /*-----------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n\n }\n }\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n if (diagonal == 0.0)\n {\n if (print_level)\n {\n hypre_printf(\" Warning! zero diagonal! Proc id %d row %d\\n\", my_id, i);\n }\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] = 0.0;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] = 0.0;\n }\n }\n else\n {\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n\n }\n\n strong_f_marker--;\n\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n strong_f_marker = -2;\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n P_marker[i1] = strong_f_marker;\n }\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n }\n }\n }\n\n jj_end_row_offd = jj_counter_offd;\n\n diagonal = A_diag_data[A_diag_i[i]];\n\n\n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *--------------------------------------------------------------*/\n\n else if (P_marker[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n sgn = 1;\n if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row &&\n (sgn * A_diag_data[jj1]) < 0)\n {\n sum += A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n }\n\n if (sum != 0)\n {\n distribute = A_diag_data[jj] / sum;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]]\n += distribute * A_offd_data[jj1];\n }\n }\n }\n }\n else\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n }\n\n /*--------------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n\n }\n\n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the\n * off-diagonal part of A\n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *-----------------------------------------------------------*/\n\n else if (P_marker_offd[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n sgn = 1;\n if (A_ext_data[A_ext_i[c_num]] < 0) { sgn = -1; }\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n\n if (i2 > -1)\n {\n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n\n }\n\n }\n\n if (sum != 0)\n {\n distribute = A_offd_data[jj] / sum;\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do\n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n\n if (i2 > -1) /* in the diagonal block */\n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2 - 1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n else\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n }\n\n /*-----------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n\n }\n }\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n if (diagonal == 0.0)\n {\n if (print_level)\n {\n hypre_printf(\" Warning! zero diagonal! Proc id %d row %d\\n\", my_id, i);\n }\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] = 0.0;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] = 0.0;\n }\n }\n else\n {\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n\n }\n\n strong_f_marker--;\n\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "r = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker[i] = 0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ex] == 0) { index++; }\n tmp_map_offd[i] = index++;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,\n P_offd_j[i],\n num_cols_P_offd);\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n }\n\n for (i = 0; i < n_fine; i++)\n {\n if (CF_marker[i] == -3) { CF_marker[i] = -1; }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------*/\n\n /* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) { coarse_shift = coarse_counter[j - 1]; }\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n fine_to_coarse[i] += coarse_shift;\n }\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }\n\n /*#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE\n\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n }\n }\n\n jj_end_row_offd = jj_counter_offd;\n\n diagonal = A_diag_data[A_diag_i[i]];\n\n\n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and influences i,\n * distribute a_{i,i1} to C-points that strongly influence i.\n * Note: currently no distribution to the diagonal in this case.\n *--------------------------------------------------------------*/\n\n else\n {\n sum = zero;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n sgn = 1;\n if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row &&\n (sgn * A_diag_data[jj1]) < 0)\n {\n sum += A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n }\n\n if (sum != 0)\n {\n distribute = A_diag_data[jj] / sum;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]]\n += distribute * A_offd_data[jj1];\n }\n }\n }\n }\n else\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n }\n\n }\n\n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the\n * off-diagonal part of A\n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *-----------------------------------------------------------*/\n\n else\n {\n sum = zero;\n\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n sgn = 1;\n if (A_ext_data[A_ext_i[c_num]] < 0) { sgn = -1; }\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n\n if (i2 > -1)\n {\n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n\n }\n\n }\n\n if (sum != 0)\n {\n distribute = A_offd_data[jj] / sum;\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do\n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n\n if (i2 > -1) /* in the diagonal block */\n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2 - 1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n else\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n }\n }\n }\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n }\n }\n\n jj_end_row_offd = jj_counter_offd;\n\n diagonal = A_diag_data[A_diag_i[i]];\n\n\n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and influences i,\n * distribute a_{i,i1} to C-points that strongly influence i.\n * Note: currently no distribution to the diagonal in this case.\n *--------------------------------------------------------------*/\n\n else\n {\n sum = zero;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n sgn = 1;\n if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row &&\n (sgn * A_diag_data[jj1]) < 0)\n {\n sum += A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n }\n\n if (sum != 0)\n {\n distribute = A_diag_data[jj] / sum;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]]\n += distribute * A_offd_data[jj1];\n }\n }\n }\n }\n else\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n }\n\n }\n\n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the\n * off-diagonal part of A\n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *-----------------------------------------------------------*/\n\n else\n {\n sum = zero;\n\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n sgn = 1;\n if (A_ext_data[A_ext_i[c_num]] < 0) { sgn = -1; }\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n\n if (i2 > -1)\n {\n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n\n }\n\n }\n\n if (sum != 0)\n {\n distribute = A_offd_data[jj] / sum;\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do\n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n\n if (i2 > -1) /* in the diagonal block */\n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2 - 1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n else\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n }\n }\n }\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "er = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker[i] = 0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ex] == 0) { index++; }\n tmp_map_offd[i] = index++;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,\n P_offd_j[i],\n num_cols_P_offd);\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n }\n\n for (i = 0; i < n_fine; i++)\n if (CF_marker[i] == -3) { CF_marker[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------*/\n\n /* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n if (CF_marker[i1] > 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n if (CF_marker_offd[i1] > 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) { coarse_shift = coarse_counter[j - 1]; }\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n fine_to_coarse[i] += coarse_shift;\n }\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }\n\n /*#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE\n\n for (jl = 0; jl < num_threads; jl++)\n {\n HYPRE_Int *P_marker, *P_marker_offd;\n\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n }\n }\n\n jj_end_row_offd = jj_counter_offd;\n\n diagonal = A_diag_data[A_diag_i[i]];\n\n\n /* Loop over ith row of A. First, the diagonal part of A */\n sum_N_pos = 0;\n sum_N_neg = 0;\n sum_P_pos = 0;\n sum_P_neg = 0;\n\n for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n if (num_functions == 1 || dof_func[i1] == dof_func[i])\n {\n if (A_diag_data[jj] > 0)\n {\n sum_N_pos += A_diag_data[jj];\n }\n else\n {\n sum_N_neg += A_diag_data[jj];\n }\n }\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n if (A_diag_data[jj] > 0)\n {\n sum_P_pos += A_diag_data[jj];\n }\n else\n {\n sum_P_neg += A_diag_data[jj];\n }\n }\n }\n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the\n * off-diagonal part of A\n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])\n {\n if (A_offd_data[jj] > 0)\n {\n sum_N_pos += A_offd_data[jj];\n }\n else\n {\n sum_N_neg += A_offd_data[jj];\n }\n }\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n if (A_offd_data[jj] > 0)\n {\n sum_P_pos += A_offd_data[jj];\n }\n else\n {\n sum_P_neg += A_offd_data[jj];\n }\n }\n\n }\n }\n if (sum_P_neg) { alfa = sum_N_neg / sum_P_neg / diagonal; }\n if (sum_P_pos) { beta = sum_N_pos / sum_P_pos / diagonal; }\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n if (P_diag_data[jj] > 0)\n {\n P_diag_data[jj] *= -beta;\n }\n else\n {\n P_diag_data[jj] *= -alfa;\n }\n }\n\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n if (P_offd_data[jj] > 0)\n {\n P_offd_data[jj] *= -beta;\n }\n else\n {\n P_offd_data[jj] *= -alfa;\n }\n }\n\n }\n\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (jl = 0; jl < num_threads; jl++)\n {\n HYPRE_Int *P_marker, *P_marker_offd;\n\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n }\n }\n\n jj_end_row_offd = jj_counter_offd;\n\n diagonal = A_diag_data[A_diag_i[i]];\n\n\n /* Loop over ith row of A. First, the diagonal part of A */\n sum_N_pos = 0;\n sum_N_neg = 0;\n sum_P_pos = 0;\n sum_P_neg = 0;\n\n for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n if (num_functions == 1 || dof_func[i1] == dof_func[i])\n {\n if (A_diag_data[jj] > 0)\n {\n sum_N_pos += A_diag_data[jj];\n }\n else\n {\n sum_N_neg += A_diag_data[jj];\n }\n }\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n if (A_diag_data[jj] > 0)\n {\n sum_P_pos += A_diag_data[jj];\n }\n else\n {\n sum_P_neg += A_diag_data[jj];\n }\n }\n }\n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the\n * off-diagonal part of A\n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])\n {\n if (A_offd_data[jj] > 0)\n {\n sum_N_pos += A_offd_data[jj];\n }\n else\n {\n sum_N_neg += A_offd_data[jj];\n }\n }\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n if (A_offd_data[jj] > 0)\n {\n sum_P_pos += A_offd_data[jj];\n }\n else\n {\n sum_P_neg += A_offd_data[jj];\n }\n }\n\n }\n }\n if (sum_P_neg) { alfa = sum_N_neg / sum_P_neg / diagonal; }\n if (sum_P_pos) { beta = sum_N_pos / sum_P_pos / diagonal; }\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n if (P_diag_data[jj] > 0)\n {\n P_diag_data[jj] *= -beta;\n }\n else\n {\n P_diag_data[jj] *= -alfa;\n }\n }\n\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n if (P_offd_data[jj] > 0)\n {\n P_offd_data[jj] *= -beta;\n }\n else\n {\n P_offd_data[jj] *= -alfa;\n }\n }\n\n }\n\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "r = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker[i] = 0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ex] == 0) { index++; }\n tmp_map_offd[i] = index++;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,\n P_offd_j[i],\n num_cols_P_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------*/\n\n /* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) { coarse_shift = coarse_counter[j - 1]; }\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n fine_to_coarse[i] += coarse_shift;\n }\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }\n\n /*#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE\n\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n strong_f_marker = -2;\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n P_marker[i1] = strong_f_marker;\n }\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n }\n }\n }\n\n jj_end_row_offd = jj_counter_offd;\n\n diagonal = A_diag_data[A_diag_i[i]];\n\n\n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n\n HERE, we only want to distribut to points of the SAME function type\n\n *--------------------------------------------------------------*/\n\n else if (P_marker[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n sgn = 1;\n if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n\n if (P_marker[i2] >= jj_begin_row &&\n (sgn * A_diag_data[jj1]) < 0 )\n {\n sum += A_diag_data[jj1];\n }\n }\n\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n }\n }\n\n if (sum != 0)\n {\n distribute = A_diag_data[jj] / sum;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]]\n += distribute * A_offd_data[jj1];\n }\n }\n }\n\n }\n }\n else /* sum = 0 - only add to diag if the same function type */\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n }\n\n /*--------------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal. (only if the same function type)\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n\n }\n\n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the\n * off-diagonal part of A\n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n\n AGAIN, we only want to distribut to points of the SAME function type\n\n *-----------------------------------------------------------*/\n\n else if (P_marker_offd[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n sgn = 1;\n if (A_ext_data[A_ext_i[c_num]] < 0) { sgn = -1; }\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (i2 > -1)\n {\n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n\n }\n }\n if (sum != 0)\n {\n distribute = A_offd_data[jj] / sum;\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do\n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (i2 > -1) /* in the diagonal block */\n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2 - 1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n }\n else /* sum = 0 */\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n }\n\n /*-----------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n\n }\n }\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n if (diagonal == 0.0)\n {\n if (print_level)\n {\n hypre_printf(\" Warning! zero diagonal! Proc id %d row %d\\n\", my_id, i);\n }\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] = 0.0;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] = 0.0;\n }\n }\n else\n {\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n }\n\n strong_f_marker--;\n\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n strong_f_marker = -2;\n\n for (i = ns; i < ne; i++)\n {\n\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)\n {\n i1 = S_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n P_marker[i1] = strong_f_marker;\n }\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)\n {\n i1 = S_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n }\n }\n }\n\n jj_end_row_offd = jj_counter_offd;\n\n diagonal = A_diag_data[A_diag_i[i]];\n\n\n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n\n HERE, we only want to distribut to points of the SAME function type\n\n *--------------------------------------------------------------*/\n\n else if (P_marker[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n sgn = 1;\n if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n\n if (P_marker[i2] >= jj_begin_row &&\n (sgn * A_diag_data[jj1]) < 0 )\n {\n sum += A_diag_data[jj1];\n }\n }\n\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n }\n }\n\n if (sum != 0)\n {\n distribute = A_diag_data[jj] / sum;\n\n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn * A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]]\n += distribute * A_offd_data[jj1];\n }\n }\n }\n\n }\n }\n else /* sum = 0 - only add to diag if the same function type */\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n }\n\n /*--------------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal. (only if the same function type)\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n if (num_functions == 1 || dof_func[i] == dof_func[i1])\n {\n diagonal += A_diag_data[jj];\n }\n }\n\n }\n\n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the\n * off-diagonal part of A\n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n\n AGAIN, we only want to distribut to points of the SAME function type\n\n *-----------------------------------------------------------*/\n\n else if (P_marker_offd[i1] == strong_f_marker)\n {\n sum = zero;\n\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n sgn = 1;\n if (A_ext_data[A_ext_i[c_num]] < 0) { sgn = -1; }\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (i2 > -1)\n {\n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n\n }\n }\n if (sum != 0)\n {\n distribute = A_offd_data[jj] / sum;\n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do\n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num + 1]; jj1++)\n {\n i2 = (HYPRE_Int)A_ext_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (i2 > -1) /* in the diagonal block */\n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn * A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2 - 1] >= jj_begin_row_offd\n && (sgn * A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2 - 1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n }\n else /* sum = 0 */\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n }\n\n /*-----------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n {\n diagonal += A_offd_data[jj];\n }\n }\n\n }\n }\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n if (diagonal == 0.0)\n {\n if (print_level)\n {\n hypre_printf(\" Warning! zero diagonal! Proc id %d row %d\\n\", my_id, i);\n }\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] = 0.0;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] = 0.0;\n }\n }\n else\n {\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n }\n\n strong_f_marker--;\n\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "r = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker[i] = 0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ex] == 0) { index++; }\n tmp_map_offd[i] = index++;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,\n P_offd_j[i],\n num_cols_P_offd);\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n }\n\n for (i = 0; i < n_fine; i++)\n if (CF_marker[i] == -3) { CF_marker[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_ilu_setup.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "perm = perm;\n h_rqperm = rqperm;\n }\n\n /* Fill data */\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < n; i++)\n {\n P_diag_i[i] = i;\n P_diag_j[i] = h_perm[i];\n P_diag_data[i] = 1.0;\n\n Q_diag_i[i] = i;\n Q_diag_j[i] = h_rqperm[i];\n Q_diag_data[i] = 1.0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "o_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------*/\n\n /* RDF: this looks a little tricky, but doable */\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}\n *--------------------------------------------------------------------*/\n else\n {\n for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n if ((CF_marker[i1] >= 0) && (method > 0))\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n if ((CF_marker_offd[i1] >= 0) && (method > 0))\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) { coarse_shift = coarse_counter[j - 1]; }\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n fine_to_coarse[i] += coarse_shift;\n }\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "f (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);\n for (i = 0; i < n_fine; i++)\n {\n if (CF_marker[i] < 0)\n {\n for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n if ( i == i1 ) /* diagonal of A only */\n {\n a_diag[i] = 1.0 / A_diag_data[jj];\n }\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " = 1.0 / A_diag_data[jj];\n }\n }\n }\n }\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n for (i = ns; i < ne; i++)\n {\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if ((CF_marker[i1] >= 0) && (method > 0))\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n /*\n if(method == 0)\n {\n P_diag_data[jj_counter] = 0.0;\n }\n */\n if (method == 1)\n {\n P_diag_data[jj_counter] = - A_diag_data[jj];\n }\n else if (method == 2)\n {\n P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i];\n }\n jj_counter++;\n }\n }\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if ((CF_marker_offd[i1] >= 0) && (method > 0))\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n /*\n if(method == 0)\n {\n P_offd_data[jj_counter_offd] = 0.0;\n }\n */\n if (method == 1)\n {\n P_offd_data[jj_counter_offd] = - A_offd_data[jj];\n }\n else if (method == 2)\n {\n P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i];\n }\n jj_counter_offd++;\n }\n }\n }\n }\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker[i] = 0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " 0) { index++; }\n tmp_map_offd[i] = index++;\n }\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,\n P_offd_j[i],\n num_cols_P_offd);\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n }\n\n for (i = 0; i < n_fine; i++)\n if (CF_marker[i] == -3) { CF_marker[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "o_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------*/\n\n /* RDF: this looks a little tricky, but doable */\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}\n *--------------------------------------------------------------------*/\n else\n {\n for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n /*--------------------------------------------------------------------\n * Set up the indexes for the DRS method\n *--------------------------------------------------------------------*/\n\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "f (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) { coarse_shift = coarse_counter[j - 1]; }\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (j < rest)\n {\n ns = j * size + j;\n ne = (j + 1) * size + j + 1;\n }\n else\n {\n ns = j * size + rest;\n ne = (j + 1) * size + rest;\n }\n for (i = ns; i < ne; i++)\n {\n fine_to_coarse[i] += coarse_shift;\n }\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "f (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);\n for (i = 0; i < n_fine; i++)\n {\n for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n if ( i == i1 ) /* diagonal of A only */\n {\n a_diag[i] = 1.0 / A_diag_data[jj];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " a_diag[i] = 1.0 / A_diag_data[jj];\n }\n }\n }\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine / num_threads;\n rest = n_fine - size * num_threads;\n if (jl < rest)\n {\n ns = jl * size + jl;\n ne = (jl + 1) * size + jl + 1;\n }\n else\n {\n ns = jl * size + rest;\n ne = (jl + 1) * size + rest;\n }\n jj_counter = 0;\n if (jl > 0) { jj_counter = jj_count[jl - 1]; }\n jj_counter_offd = 0;\n if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);\n if (num_cols_A_offd)\n {\n P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n }\n else\n {\n P_marker_offd = NULL;\n }\n\n for (i = 0; i < n_fine; i++)\n {\n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker_offd[i] = -1;\n }\n for (i = ns; i < ne; i++)\n {\n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n else\n {\n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i];\n\n jj_counter++;\n }\n }\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i];\n\n jj_counter_offd++;\n }\n }\n }\n }\n P_offd_i[i + 1] = jj_counter_offd;\n }\n hypre_TFree(P_marker, HYPRE_MEMORY_HOST);\n hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);\n } #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "pre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols_A_offd; i++)\n {\n P_marker[i] = 0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/parcsr_ls/par_mgr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " 0) { index++; }\n tmp_map_offd[i] = index++;\n }\n\n#if 0\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < P_offd_size; i++)\n {\n P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,\n P_offd_j[i],\n num_cols_P_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/hopscotch_hash.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "h = hypre_TAlloc(HYPRE_Int, num_buckets, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n for (i = 0; i < num_buckets; ++i)\n {\n s->hopInfo[i] = 0;\n s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/hopscotch_hash.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " hypre_TAlloc(HYPRE_BigInt, num_buckets, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n for (i = 0; i < num_buckets; ++i)\n {\n s->hopInfo[i] = 0;\n s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/hopscotch_hash.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lloc(hypre_HopscotchBucket, num_buckets, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n for (i = 0; i < num_buckets; i++)\n {\n InitBucket(&m->table[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/hopscotch_hash.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "c(hypre_BigHopscotchBucket, num_buckets, HYPRE_MEMORY_HOST);\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n for (i = 0; i < num_buckets; i++)\n {\n InitBigBucket(&m->table[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/int_array.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "HYPRE_Int size = hypre_IntArraySize(v);\n HYPRE_Int i;\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n array_data[i] = value;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/int_array.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "HYPRE_Int size = hypre_IntArraySize(v);\n HYPRE_Int i;\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n array_data[i] = i % cycle;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/int_array.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:num_values) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "_Int num_values = 0;\n HYPRE_Int i;\n\n#if !defined(_MSC_VER) && defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n num_values += (array_data[i] == value) ? 1 : 0;\n } #pragma omp parallel for private(i) reduction(+:num_values) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/int_array.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "HYPRE_Int *w_data = hypre_IntArrayData(w);\n\n HYPRE_Int i;\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n w_data[v_data[i]] = i;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/int_array.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " hypre_IntArrayNegateDevice(v);\n }\n else\n#endif\n {\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n array_data[i] = - array_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/merge_sort.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "nverse_map, 2 * len, 16 * hypre_NumThreads());\n HYPRE_Int i;\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n for (i = 0; i < len; i++)\n {\n HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i);\n hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);\n#ifdef DBG_MERGE_SORT\n if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)\n {\n fprintf(stderr, \"%d %d\\n\", i, (*out)[i]);\n hypre_assert(false);\n }\n\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/utilities/merge_sort.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "nverse_map, 2 * len, 16 * hypre_NumThreads());\n HYPRE_Int i;\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n for (i = 0; i < len; i++)\n {\n HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i);\n hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);\n#ifdef DBG_MERGE_SORT\n if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i)\n {\n fprintf(stderr, \"%d %d\\n\", i, (*out)[i]);\n hypre_assert(false);\n }\n\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/sstruct_ls/maxwell_zeroBC.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ectorData(v);\n HYPRE_Int i;\n HYPRE_Int ierr = 0;\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < nrows; i++)\n {\n vector_data[rows[i]] = 0.0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i, ii, j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "square */\n if (num_rows_A != num_cols_A)\n {\n return -1;\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < nnzrows_A; i++)\n {\n ii = rownnz_A ? rownnz_A[i] : i;\n for (j = A_i[ii]; j < A_i[ii + 1]; j++)\n {\n if (A_j[j] == ii)\n {\n if (j != A_i[ii])\n {\n hypre_swap(A_j, A_i[ii], j);\n hypre_swap_c(A_data, A_i[ii], j);\n }\n break;\n }\n }\n } #pragma omp parallel for private(i, ii, j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " num_nonzeros = hypre_CSRMatrixNumNonzeros(A);\n HYPRE_Int i;\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_nonzeros; i++)\n {\n sum += data[i];\n } #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "YPRE_Complex sum = 0;\n\n hypre_assert(num_nonzeros == A_i[nrows]);\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_nonzeros; ++i)\n {\n HYPRE_Complex v = A_data[i];\n sum += v * v;\n } #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, sl, sr) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "PRE_Complex sl;\n HYPRE_Complex sr;\n\n if (ldata && rdata)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < nrows; i++)\n {\n sl = ldata[i];\n for (j = A_i[i]; j < A_i[i + 1]; j++)\n {\n sr = rdata[A_j[j]];\n A_data[j] = sl * A_data[j] * sr;\n }\n } #pragma omp parallel for private(i, j, sl, sr) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, sl) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "[j] * sr;\n }\n }\n }\n else if (ldata && !rdata)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < nrows; i++)\n {\n sl = ldata[i];\n for (j = A_i[i]; j < A_i[i + 1]; j++)\n {\n A_data[j] = sl * A_data[j];\n }\n } #pragma omp parallel for private(i, j, sl) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, sr) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "_data[j];\n }\n }\n }\n else if (!ldata && rdata)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < nrows; i++)\n {\n for (j = A_i[i]; j < A_i[i + 1]; j++)\n {\n sr = rdata[A_j[j]];\n A_data[j] = A_data[j] * sr;\n }\n } #pragma omp parallel for private(i, j, sr) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "Int total_size = size * num_vectors;\n HYPRE_Int i;\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < total_size; i++)\n {\n vector_data[i] = value;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ce(x, istride, ostride, size, data);\n }\n else\n#endif\n {\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i += istride)\n {\n x_data[(i / istride) * ostride] = data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "Int total_size = size * num_vectors;\n HYPRE_Int i;\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < total_size; i++)\n {\n y_data[i] *= alpha;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "Int total_size = size * num_vectors;\n HYPRE_Int i;\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < total_size; i++)\n {\n y_data[i] += alpha * x_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "Int total_size = size * num_vectors;\n HYPRE_Int i;\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < total_size; i++)\n {\n z_data[i] = alpha * x_data[i] + beta * y_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " num_vectors_y == 1)\n {\n if (marker)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < size; i++)\n {\n if (marker[i] == marker_val)\n {\n y_data[i] += x_data[i] / b_data[i];\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < size; i++)\n {\n y_data[i] += x_data[i] / b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " num_vectors_y == 2)\n {\n if (marker)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < size; i++)\n {\n val = 1.0 / b_data[i];\n if (marker[i] == marker_val)\n {\n y_data[i] += x_data[i] * val;\n y_data[i + size] += x_data[i + size] * val;\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < size; i++)\n {\n val = 1.0 / b_data[i];\n\n y_data[i] += x_data[i] * val;\n y_data[i + size] += x_data[i + size] * val;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "s_x == num_vectors_y)\n {\n if (marker)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < size; i++)\n {\n val = 1.0 / b_data[i];\n if (marker[i] == marker_val)\n {\n for (j = 0; j < num_vectors_x; j++)\n {\n y_data[i + size * j] += x_data[i + size * j] * val;\n }\n }\n } #pragma omp parallel for private(i, j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < size; i++)\n {\n val = 1.0 / b_data[i];\n for (j = 0; j < num_vectors_x; j++)\n {\n y_data[i + size * j] += x_data[i + size * j] * val;\n }\n } #pragma omp parallel for private(i, j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ors;\n\n HYPRE_Real result = 0.0;\n HYPRE_Int i;\n\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < total_size; i++)\n {\n result += hypre_conj(y_data[i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ze * num_vectors;\n\n HYPRE_Complex sum = 0;\n HYPRE_Int i;\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < total_size; i++)\n {\n sum += data[i];\n } #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "stribute parallel for private(i) is_device_ptr(y_data, x_data)\n#elif defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++)\n {\n y_data[i] = beta * b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "------------------------------*/\n\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++)\n {\n y_data[i] = 0.0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "i] = 0.0;\n }\n }\n else if (temp == 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++)\n {\n y_data[i] = b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "data[i];\n }\n }\n else if (temp == -1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++)\n {\n y_data[i] = -b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " y_data[i] = -b_data[i];\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++)\n {\n y_data[i] = temp * b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj,m) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " switch (num_vectors)\n {\n case 2:\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n\n HYPRE_Complex tmp[2] = {0.0, 0.0};\n for (jj = A_i[m]; jj < A_i[m + 1]; jj++)\n {\n HYPRE_Int xidx = A_j[jj] * idxstride_x;\n HYPRE_Complex coef = A_data[jj];\n\n tmp[0] += coef * x_data[xidx];\n tmp[1] += coef * x_data[xidx + vecstride_x];\n }\n HYPRE_Int yidx = m * idxstride_y;\n\n y_data[yidx] += tmp[0];\n y_data[yidx + vecstride_y] += tmp[1];\n } #pragma omp parallel for private(i,j,jj,m) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj,m) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n break;\n\n case 3:\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n\n HYPRE_Complex tmp[3] = {0.0, 0.0, 0.0};\n for (jj = A_i[m]; jj < A_i[m + 1]; jj++)\n {\n HYPRE_Int xidx = A_j[jj] * idxstride_x;\n HYPRE_Complex coef = A_data[jj];\n\n tmp[0] += coef * x_data[xidx];\n tmp[1] += coef * x_data[xidx + vecstride_x];\n tmp[2] += coef * x_data[xidx + 2 * vecstride_x];\n }\n HYPRE_Int yidx = m * idxstride_y;\n\n y_data[yidx] += tmp[0];\n y_data[yidx + vecstride_y] += tmp[1];\n y_data[yidx + 2 * vecstride_y] += tmp[2];\n } #pragma omp parallel for private(i,j,jj,m) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj,m) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n break;\n\n case 4:\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n\n HYPRE_Complex tmp[4] = {0.0, 0.0, 0.0, 0.0};\n for (jj = A_i[m]; jj < A_i[m + 1]; jj++)\n {\n HYPRE_Int xidx = A_j[jj] * idxstride_x;\n HYPRE_Complex coef = A_data[jj];\n\n tmp[0] += coef * x_data[xidx];\n tmp[1] += coef * x_data[xidx + vecstride_x];\n tmp[2] += coef * x_data[xidx + 2 * vecstride_x];\n tmp[3] += coef * x_data[xidx + 3 * vecstride_x];\n }\n HYPRE_Int yidx = m * idxstride_y;\n\n y_data[yidx] += tmp[0];\n y_data[yidx + vecstride_y] += tmp[1];\n y_data[yidx + 2 * vecstride_y] += tmp[2];\n y_data[yidx + 3 * vecstride_y] += tmp[3];\n } #pragma omp parallel for private(i,j,jj,m) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n break;\n\n default:\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n for (j = 0; j < num_vectors; j++)\n {\n tempx = 0.0;\n for (jj = A_i[m]; jj < A_i[m + 1]; jj++)\n {\n tempx += A_data[jj] * x_data[j * vecstride_x + A_j[jj] * idxstride_x];\n }\n y_data[j * vecstride_y + m * idxstride_y] += tempx;\n }\n } #pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " switch (num_vectors)\n {\n case 2:\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n HYPRE_Complex tmp[2] = {0.0, 0.0};\n for (jj = A_i[i]; jj < A_i[i + 1]; jj++)\n {\n HYPRE_Int xidx = A_j[jj] * idxstride_x;\n HYPRE_Complex coef = A_data[jj];\n\n tmp[0] += coef * x_data[xidx];\n tmp[1] += coef * x_data[xidx + vecstride_x];\n }\n HYPRE_Int yidx = i * idxstride_y;\n\n y_data[yidx] += tmp[0];\n y_data[yidx + vecstride_y] += tmp[1];\n } #pragma omp parallel for private(i,j,jj) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n break;\n\n case 3:\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n HYPRE_Complex tmp[3] = {0.0, 0.0, 0.0};\n for (jj = A_i[i]; jj < A_i[i + 1]; jj++)\n {\n HYPRE_Int xidx = A_j[jj] * idxstride_x;\n HYPRE_Complex coef = A_data[jj];\n\n tmp[0] += coef * x_data[xidx];\n tmp[1] += coef * x_data[xidx + vecstride_x];\n tmp[2] += coef * x_data[xidx + 2 * vecstride_x];\n }\n HYPRE_Int yidx = i * idxstride_y;\n\n y_data[yidx] += tmp[0];\n y_data[yidx + vecstride_y] += tmp[1];\n y_data[yidx + 2 * vecstride_y] += tmp[2];\n } #pragma omp parallel for private(i,j,jj) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n break;\n\n case 4:\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n HYPRE_Complex tmp[4] = {0.0, 0.0, 0.0, 0.0};\n for (jj = A_i[i]; jj < A_i[i + 1]; jj++)\n {\n HYPRE_Int xidx = A_j[jj] * idxstride_x;\n HYPRE_Complex coef = A_data[jj];\n\n tmp[0] += coef * x_data[xidx];\n tmp[1] += coef * x_data[xidx + vecstride_x];\n tmp[2] += coef * x_data[xidx + 2 * vecstride_x];\n tmp[3] += coef * x_data[xidx + 3 * vecstride_x];\n }\n HYPRE_Int yidx = i * idxstride_y;\n\n y_data[yidx] += tmp[0];\n y_data[yidx + vecstride_y] += tmp[1];\n y_data[yidx + 2 * vecstride_y] += tmp[2];\n y_data[yidx + 3 * vecstride_y] += tmp[3];\n } #pragma omp parallel for private(i,j,jj) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n break;\n\n default:\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n for (j = 0; j < num_vectors; ++j)\n {\n tempx = 0.0;\n for (jj = A_i[i]; jj < A_i[i + 1]; jj++)\n {\n tempx += A_data[jj] * x_data[j * vecstride_x + A_j[jj] * idxstride_x];\n }\n y_data[j * vecstride_y + i * idxstride_y] += tempx;\n }\n } #pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-----------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows * num_vectors; i++)\n {\n y_data[i] *= alpha;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "is smaller than xpar*num_rows */\n\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = 0.0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "a[i] = 0.0;\n }\n\n if (alpha == 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx += A_data[j] * x_data[A_j[j]];\n }\n y_data[m] = tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " } // y = A*x\n else if (alpha == -1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx -= A_data[j] * x_data[A_j[j]];\n }\n y_data[m] = tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n } // y = -A*x\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx += A_data[j] * x_data[A_j[j]];\n }\n y_data[m] = alpha * tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " beta == -alpha\n {\n if (alpha == 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = -b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n y_data[i] = -b_data[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx += A_data[j] * x_data[A_j[j]];\n }\n y_data[m] += tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " } // y = A*x - b\n else if (alpha == -1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n y_data[i] = b_data[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx += A_data[j] * x_data[A_j[j]];\n }\n y_data[m] -= tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n } // y = -A*x + b\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = -alpha * b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " y_data[i] = -alpha * b_data[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx += A_data[j] * x_data[A_j[j]];\n }\n y_data[m] += alpha * tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "/ beta == alpha\n {\n if (alpha == 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n y_data[i] = b_data[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx += A_data[j] * x_data[A_j[j]];\n }\n y_data[m] += tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " } // y = A*x + b\n else if (alpha == -1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = -b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n y_data[i] = -b_data[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx -= A_data[j] * x_data[A_j[j]];\n }\n y_data[m] += tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n } // y = -A*x - b\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = alpha * b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\n y_data[i] = alpha * b_data[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx += A_data[j] * x_data[A_j[j]];\n }\n y_data[m] += alpha * tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n else\n {\n if (alpha == 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = beta * b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "{\n y_data[i] = beta * b_data[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx += A_data[j] * x_data[A_j[j]];\n }\n y_data[m] += tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "// y = A*x + beta*b\n else if (-1 == alpha)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = -temp * b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\n y_data[i] = -temp * b_data[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx -= A_data[j] * x_data[A_j[j]];\n }\n y_data[m] += tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n } // y = -A*x - temp*b\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n y_data[i] = beta * b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "{\n y_data[i] = beta * b_data[i];\n }\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n tempx = 0.0;\n for (j = A_i[m]; j < A_i[m + 1]; j++)\n {\n tempx += A_data[j] * x_data[A_j[j]];\n }\n y_data[m] += alpha * tempx;\n } #pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * num_vectors; i++)\n {\n y_data[i] *= beta;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " alpha;\n\n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * num_vectors; i++)\n {\n y_data[i] = 0.0;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " y_data[i] = 0.0;\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * num_vectors; i++)\n {\n y_data[i] *= temp;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_cols * num_vectors; i++)\n {\n y_data[i] *= alpha;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) { y_data[i] *= beta; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " alpha;\n\n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) { y_data[i] = 0.0; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ker_x[i] == fpt) { y_data[i] = 0.0; }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) { y_data[i] *= temp; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " *-----------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n {\n if (CF_marker_x[i] == fpt)\n {\n temp = y_data[i];\n for (jj = A_i[i]; jj < A_i[i + 1]; jj++)\n if (CF_marker_y[A_j[jj]] == fpt) { temp += A_data[jj] * x_data[A_j[jj]]; }\n y_data[i] = temp;\n }\n } #pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) { y_data[i] *= alpha; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " 0; j < k - 7; j += 8)\n {\n jstart = j * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[j] * x_data[jstart + i] + alpha[j + 1] * x_data[jstart + i + size]\n + alpha[j + 2] * x_data[(j + 2) * size + i] + alpha[j + 3] * x_data[(j + 3) * size + i]\n + alpha[j + 4] * x_data[(j + 4) * size + i] + alpha[j + 5] * x_data[(j + 5) * size + i]\n + alpha[j + 6] * x_data[(j + 6) * size + i] + alpha[j + 7] * x_data[(j + 7) * size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n }\n if (restk == 1)\n {\n jstart = (k - 1) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[k - 1] * x_data[jstart + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n else if (restk == 2)\n {\n jstart = (k - 2) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[k - 2] * x_data[jstart + i] + alpha[k - 1] * x_data[jstart + size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n else if (restk == 3)\n {\n jstart = (k - 3) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[k - 3] * x_data[jstart + i] + alpha[k - 2] * x_data[jstart + size + i] + alpha[k\n - 1] *\n x_data[(k - 1) * size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n else if (restk == 4)\n {\n jstart = (k - 4) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[k - 4] * x_data[(k - 4) * size + i] + alpha[k - 3] * x_data[(k - 3) * size + i]\n + alpha[k - 2] * x_data[(k - 2) * size + i] + alpha[k - 1] * x_data[(k - 1) * size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "k - 1) * size + i];\n }\n }\n else if (restk == 5)\n {\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += + alpha[k - 5] * x_data[(k - 5) * size + i] + alpha[k - 4] * x_data[(k - 4) * size + i]\n + alpha[k - 3] * x_data[(k - 3) * size + i] + alpha[k - 2] * x_data[(k - 2) * size + i]\n + alpha[k - 1] * x_data[(k - 1) * size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n else if (restk == 6)\n {\n jstart = (k - 6) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[k - 6] * x_data[jstart + i] + alpha[k - 5] * x_data[jstart + i + size]\n + alpha[k - 4] * x_data[(k - 4) * size + i] + alpha[k - 3] * x_data[(k - 3) * size + i]\n + alpha[k - 2] * x_data[(k - 2) * size + i] + alpha[k - 1] * x_data[(k - 1) * size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n else if (restk == 7)\n {\n jstart = (k - 7) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[k - 7] * x_data[jstart + i] + alpha[k - 6] * x_data[jstart + i + size]\n + alpha[k - 5] * x_data[(k - 5) * size + i] + alpha[k - 4] * x_data[(k - 4) * size + i]\n + alpha[k - 3] * x_data[(k - 3) * size + i] + alpha[k - 2] * x_data[(k - 2) * size + i]\n + alpha[k - 1] * x_data[(k - 1) * size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " 0; j < k - 3; j += 4)\n {\n jstart = j * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[j] * x_data[jstart + i] + alpha[j + 1] * x_data[jstart + i + size]\n + alpha[j + 2] * x_data[(j + 2) * size + i] + alpha[j + 3] * x_data[(j + 3) * size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n }\n if (restk == 1)\n {\n jstart = (k - 1) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[k - 1] * x_data[jstart + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n else if (restk == 2)\n {\n jstart = (k - 2) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[k - 2] * x_data[jstart + i] + alpha[k - 1] * x_data[jstart + size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "}\n else if (restk == 3)\n {\n jstart = (k - 3) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[k - 3] * x_data[jstart + i] + alpha[k - 2] * x_data[jstart + size + i] + alpha[k\n - 1] *\n x_data[(k - 1) * size + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "or (j = 0; j < k; j++)\n {\n jstart = j * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n y_data[i] += alpha[j] * x_data[jstart + i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7,res8) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "tart6 = jstart5 + size;\n jstart7 = jstart6 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];\n res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];\n res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i];\n res6 += hypre_conj(y_data[jstart5 + i]) * x_data[i];\n res7 += hypre_conj(y_data[jstart6 + i]) * x_data[i];\n res8 += hypre_conj(y_data[jstart7 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7,res8) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "stk == 1)\n {\n res1 = 0;\n jstart = (k - 1) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart = (k - 2) * size;\n jstart1 = jstart + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart1 = jstart + size;\n jstart2 = jstart1 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart2 = jstart1 + size;\n jstart3 = jstart2 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];\n res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart3 = jstart2 + size;\n jstart4 = jstart3 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];\n res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];\n res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart4 = jstart3 + size;\n jstart5 = jstart4 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];\n res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];\n res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i];\n res6 += hypre_conj(y_data[jstart5 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart5 = jstart4 + size;\n jstart6 = jstart5 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];\n res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];\n res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i];\n res6 += hypre_conj(y_data[jstart5 + i]) * x_data[i];\n res7 += hypre_conj(y_data[jstart6 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "tart2 = jstart1 + size;\n jstart3 = jstart2 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];\n res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "stk == 1)\n {\n res1 = 0;\n jstart = (k - 1) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart = (k - 2) * size;\n jstart1 = jstart + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart1 = jstart + size;\n jstart2 = jstart1 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res1 += hypre_conj(y_data[jstart + i]) * x_data[i];\n res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];\n res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_x8,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7,res_y8) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "tart6 = jstart5 + size;\n jstart7 = jstart6 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];\n res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];\n res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];\n res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];\n res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i];\n res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i];\n res_x6 += hypre_conj(z_data[jstart5 + i]) * x_data[i];\n res_y6 += hypre_conj(z_data[jstart5 + i]) * y_data[i];\n res_x7 += hypre_conj(z_data[jstart6 + i]) * x_data[i];\n res_y7 += hypre_conj(z_data[jstart6 + i]) * y_data[i];\n res_x8 += hypre_conj(z_data[jstart7 + i]) * x_data[i];\n res_y8 += hypre_conj(z_data[jstart7 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_x8,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7,res_y8) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " res_x1 = 0;\n res_y1 = 0;\n jstart = (k - 1) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart = (k - 2) * size;\n jstart1 = jstart + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart1 = jstart + size;\n jstart2 = jstart1 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];\n res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart2 = jstart1 + size;\n jstart3 = jstart2 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];\n res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];\n res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];\n res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_y1,res_y2,res_y3,res_y4,res_y5) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart3 = jstart2 + size;\n jstart4 = jstart3 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];\n res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];\n res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];\n res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];\n res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i];\n res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_y1,res_y2,res_y3,res_y4,res_y5) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart4 = jstart3 + size;\n jstart5 = jstart4 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];\n res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];\n res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];\n res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];\n res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i];\n res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i];\n res_x6 += hypre_conj(z_data[jstart5 + i]) * x_data[i];\n res_y6 += hypre_conj(z_data[jstart5 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart5 = jstart4 + size;\n jstart6 = jstart5 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];\n res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];\n res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];\n res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];\n res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i];\n res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i];\n res_x6 += hypre_conj(z_data[jstart5 + i]) * x_data[i];\n res_y6 += hypre_conj(z_data[jstart5 + i]) * y_data[i];\n res_x7 += hypre_conj(z_data[jstart6 + i]) * x_data[i];\n res_y7 += hypre_conj(z_data[jstart6 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "tart2 = jstart1 + size;\n jstart3 = jstart2 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];\n res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];\n res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];\n res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " res_x1 = 0;\n res_y1 = 0;\n jstart = (k - 1) * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart = (k - 2) * size;\n jstart1 = jstart + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " jstart1 = jstart + size;\n jstart2 = jstart1 + size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];\n res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];\n res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];\n res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];\n res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "j++)\n {\n res = 0;\n jstart = j * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res += hypre_conj(y_data[jstart + i]) * x_data[i];\n } #pragma omp parallel for private(i) reduction(+:res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/seq_mv/vector_batched.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:res_x,res_y) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " res_y = 0; //result_y[j];\n jstart = j * size;\n#if defined(HYPRE_USING_OPENMP)\n for (i = 0; i < size; i++)\n {\n res_x += hypre_conj(z_data[jstart + i]) * x_data[i];\n res_y += hypre_conj(z_data[jstart + i]) * y_data[i];\n } #pragma omp parallel for private(i) reduction(+:res_x,res_y) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/chiao45/mgmetis/mgmetis/src/metis/GKlib/csr.c", "omp_pragma_line": "#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)", "context_chars": 100, "text": "eak;\n default:\n gk_errexit(SIGERR, \"Invalid sum type of %d.\\n\", what);\n return;\n }\n\n for (i=0; inrows;\n ptr = mat->rowptr;\n val = mat->rowval;\n\n if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);\n\n norms = mat->rnorms = gk_fsmalloc(n, 0, \"gk_csr_ComputeSums: norms\");\n break;\n case GK_CSR_COL:\n n = mat->ncols;\n ptr = mat->colptr;\n val = mat->colval;\n\n if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);\n\n norms = mat->cnorms = gk_fsmalloc(n, 0, \"gk_csr_ComputeSums: norms\");\n break;\n default:\n gk_errexit(SIGERR, \"Invalid norm type of %d.\\n\", what);\n return;\n }\n\n #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)\n for (i=0; i #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/chiao45/mgmetis/mgmetis/src/metis/GKlib/csr.c", "omp_pragma_line": "#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)", "context_chars": 100, "text": "ak;\n default:\n gk_errexit(SIGERR, \"Invalid norm type of %d.\\n\", what);\n return;\n }\n\n for (i=0; irowptr)\n gk_errexit(SIGERR, \"Row-based view of the matrix does not exists.\\n\");\n nind1 = mat->rowptr[i1+1]-mat->rowptr[i1];\n nind2 = mat->rowptr[i2+1]-mat->rowptr[i2];\n ind1 = mat->rowind + mat->rowptr[i1];\n ind2 = mat->rowind + mat->rowptr[i2];\n val1 = mat->rowval + mat->rowptr[i1];\n val2 = mat->rowval + mat->rowptr[i2];\n break;\n\n case GK_CSR_COL:\n if (!mat->colptr)\n gk_errexit(SIGERR, \"Column-based view of the matrix does not exists.\\n\");\n nind1 = mat->colptr[i1+1]-mat->colptr[i1];\n nind2 = mat->colptr[i2+1]-mat->colptr[i2];\n ind1 = mat->colind + mat->colptr[i1];\n ind2 = mat->colind + mat->colptr[i2];\n val1 = mat->colval + mat->colptr[i1];\n val2 = mat->colval + mat->colptr[i2];\n break;\n\n default:\n gk_errexit(SIGERR, \"Invalid index type of %d.\\n\", what);\n return 0.0;\n }\n\n\n switch (simtype) {\n case GK_CSR_COS:\n case GK_CSR_JAC:\n sim = stat1 = stat2 = 0.0;\n i1 = i2 = 0;\n while (i1 ind2[i2]) {\n stat2 += val2[i2]*val2[i2];\n i2++;\n }\n else {\n sim += val1[i1]*val2[i2];\n stat1 += val1[i1]*val1[i1];\n stat2 += val2[i2]*val2[i2];\n i1++;\n i2++;\n }\n }\n if (simtype == GK_CSR_COS)\n sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);\n else \n sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);\n break;\n\n case GK_CSR_MIN:\n sim = stat1 = stat2 = 0.0;\n i1 = i2 = 0;\n while (i1 ind2[i2]) {\n stat2 += val2[i2];\n i2++;\n }\n else {\n sim += gk_min(val1[i1],val2[i2]);\n stat1 += val1[i1];\n stat2 += val2[i2];\n i1++;\n i2++;\n }\n }\n sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);\n\n break;\n\n case GK_CSR_AMIN:\n sim = stat1 = stat2 = 0.0;\n i1 = i2 = 0;\n while (i1 ind2[i2]) {\n stat2 += val2[i2];\n i2++;\n }\n else {\n sim += gk_min(val1[i1],val2[i2]);\n stat1 += val1[i1];\n stat2 += val2[i2];\n i1++;\n i2++;\n }\n }\n sim = (stat1 > 0.0 ? sim/stat1 : 0.0);\n\n break;\n\n default:\n gk_errexit(SIGERR, \"Unknown similarity measure %d\\n\", simtype);\n return -1;\n }\n\n return sim;\n\n} #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/stefanomoriconi/libmpMuelMat/C-libs/mp_comp_MM_polarim_Params.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "double *Mdelta_in,\n int *idx_in, int *numel_in )\n{\n int m = 16;\n\tfor (int i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/stefanomoriconi/libmpMuelMat/C-libs/mp_comp_MM_AIW.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " , double *I_in , double *W_in ,\n int *idx_in, int *numel_in )\n{\n int m = 16;\n\tfor (int i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/stefanomoriconi/libmpMuelMat/C-libs/mp_comp_MM_AIW.c", "omp_pragma_line": "#pragma omp parallel for (parallel)", "context_chars": 100, "text": "+3] , &M_out[idx_in[i]*m+7] , &M_out[idx_in[i]*m+11] , &M_out[idx_in[i]*m+15] );\n\n } // End of #pragma omp parallel for \n for (int i=0; i #pragma omp parallel for (parallel)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/stefanomoriconi/libmpMuelMat/C-libs/mp_comp_MM_AIW.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "dx_in[i]*m+11] , &M_out[idx_in[i]*m+15] );\n\n } // End of #pragma omp parallel for (parallel)\n for (int i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/stefanomoriconi/libmpMuelMat/C-libs/mp_comp_MM_eig_REls.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ueller Matrix coefficients as input! i.e. m11 equal to 1.0 everywhere\n int l = 4; \n int m = 16;\n for (int i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/stefanomoriconi/libmpMuelMat/C-libs/test_openMP.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "openMP()\n{\n printf(\" Testing parallel-computing (openMP) libraries:... \\n\\n\");\n printf(\" >> \");\n for (int i=0; i<10; ++i)\n {\n printf(\"%d \",i);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/stefanomoriconi/libmpMuelMat/C-libs/mp_comp_MM_det.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " *NORMALISED* Mueller Matrix coefficients as input! i.e. m11 equal to 1.0 everywhere\n int m = 16;\n\tfor (int i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/stefanomoriconi/libmpMuelMat/C-libs/mp_comp_MM_pol_LuChipman.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "*NORMALISED* Mueller Matrix coefficients as input! i.e. m11 equal to 1.0 everywhere\n int m = 16;\n for (int i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NJU-TJL/OpenMP-MPI_Labs/Lab02/OpenMP/LU_OpenMP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/计算L、U矩阵\t\n\tfor (int i = 0; i < N; i++) {\n\t\tU[i][i] = A[i][i] - sum_i_j_K(i, i, i);\n\t\tL[i][i] = 1;\n\t\tfor (int j = i+1; j < N; j++) {\n\t\t\t//按照递推公式进行计算\n\t\t\tU[i][j] = A[i][j] - sum_i_j_K(i, j, i);\n\t\t\tL[j][i] = (A[j][i] - sum_i_j_K(j, i, i)) / U[i][i];\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NJU-TJL/OpenMP-MPI_Labs/Lab01/OpenMP/MatrixMtp_OpenMP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "线程数\n omp_set_num_threads(n_threads);\n\n //计时开始\n double ts = omp_get_wtime();\n\n //计算C\n for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n for (int k = 0; k < n; k++) {\n C[i][j] += A[i][k] * B[k][j];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NJU-TJL/OpenMP-MPI_Labs/Lab03/OpenMP/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ARG], &filenames);\n // 分配存放所有文件的文档向量的空间\n vectors = (int **)calloc(file_count, sizeof(int *));\nfor (int i = 0; i < file_count; ++i) {\n vectors[i] = (int *)calloc(dict_size, sizeof(int));\n // 读取文件并生成文档向量\n make_profile(filenames[i], dict_size, vectors[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc_funs.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "olumns vector\n double * ssc;\n if(!rank) {\n ssc = malloc(m.w * sizeof(double));\n for(int i = 0; i < m.w; i++)\n ssc[i] = block_sum_col_sqr(m.data + i, m.h, m.w);\n }\n\n // Sequential section, faster on some setups\n #ifdef SEQ_SQR\n if(rank) return m;\n matrix mt = new_matrix(m.w, m.w);\n double done = 1.0, dzero = 0.0;\n dsyrk_(\"L\", \"N\", &m.w, &m.h, &done, m.data, &m.w, &dzero, mt.data, &m.w);\n #else\n // Distribute m\n MPI_Bcast(&m, sizeof(matrix), MPI_BYTE, 0, comm);\n int nb = BLOCK_SIZE, izero = 0, ione = 1, info;\n int mp = numroc_(&m.w, &nb, &blacs_row, &izero, &blacs_height);\n int nq = numroc_(&m.h, &nb, &blacs_col, &izero, &blacs_width);\n matrix m_local = new_matrix(mp, nq);\n gmc_distribute(m.w, m.h, m.data, m_local.data, rank, blacs_width, blacs_height, nb, comm);\n \n arr_desc mlocald, mtlocald;\n int lld_local = mp > 1 ? mp : 1;\n descinit_(&mlocald, &m.w, &m.h, &nb, &nb, &izero, &izero, &blacs_ctx, &lld_local, &info);\n nq = numroc_(&m.w, &nb, &blacs_col, &izero, &blacs_width);\n descinit_(&mtlocald, &m.w, &m.w, &nb, &nb, &izero, &izero, &blacs_ctx, &lld_local, &info);\n \n // Compute multiplication by transpose (upper triangular only)\n matrix mt_local = new_matrix(mp, nq);\n double done = 1.0, dzero = 0.0;\n pdsyrk_(\"L\", \"N\", &m.w, &m.h, &done, m_local.data, &ione, &ione, &mlocald, &dzero, mt_local.data, &ione, &ione, &mtlocald);\n free_matrix(m_local);\n\n // Collect mt\n matrix mt;\n if(!rank) mt = new_matrix(m.w, m.w);\n gmc_collect(m.w, m.w, mt_local.data, mt.data, rank, blacs_width, blacs_height, nb, comm);\n free_matrix(mt_local);\n\n // Workers can return here\n if(rank) return mt;\n \n\n // Compute final matrix\n #pragma omp parallel for\n for(long long i = 0; i < m.w; i++) {\n mt.data[i * m.w + i] = 0.0;\n for(long long j = i + 1; j < m.w; j++) {\n double mul = mt.data[i * m.w + j];\n mt.data[j * m.w + i] = mt.data[i * m.w + j] = ssc[i] + ssc[j] - 2.0 * mul;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc_funs.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " // Workers can return here\n if(rank) return mt;\n #endif\n\n // Compute final matrix\n for(long long i = 0; i < m.w; i++) {\n mt.data[i * m.w + i] = 0.0;\n for(long long j = i + 1; j < m.w; j++) {\n double mul = mt.data[i * m.w + j];\n mt.data[j * m.w + i] = mt.data[i * m.w + j] = ssc[i] + ssc[j] - 2.0 * mul;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc_scale.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "PI_Bcast(&w, 1, MPI_INT, 0, comm);\n MPI_Bcast(&h, 1, MPI_INT, 0, comm);\n\n if(!rank) {\n for(int r = 0; r < numprocs; r++) {\n // Dimensions of r's local matrix\n int blacs_col = r / blacs_height;\n int blacs_row = r % blacs_height;\n long long mp = numroc_(&w, &nb, &blacs_row, &izero, &blacs_height);\n long long nq = numroc_(&h, &nb, &blacs_col, &izero, &blacs_width);\n long long numbytes = mp * nq * sizeof(double);\n\n // Set up block-cyclic distribution start\n long long y = nb * blacs_col, x = (long long) (nb * blacs_row) * sizeof(double);\n int y_blk = 0, x_blk = 0;\n if(!r) {\n // Self: copy into local buffer\n _copy_cyclic(w, h, nb, blacs_width, blacs_height, blacs_col, blacs_row, a, b, LLONG_MAX, &y, &y_blk, &x, &x_blk);\n continue;\n }\n\n // Send to process: split the message in chunks to control max size\n double * buf = malloc(_minl(numbytes, MAX_MPI_MSG_BYTES));\n for(long long pos = 0; pos < numbytes; pos += MAX_MPI_MSG_BYTES) {\n _copy_cyclic(w, h, nb, blacs_width, blacs_height, blacs_col, blacs_row, a, buf, MAX_MPI_MSG_BYTES, &y, &y_blk, &x, &x_blk);\n long long amt = _minl(numbytes - pos, MAX_MPI_MSG_BYTES);\n MPI_Send(buf, (int) amt, MPI_BYTE, r, 2711, comm);\n }\n free(buf);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc_scale.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "PI_Bcast(&w, 1, MPI_INT, 0, comm);\n MPI_Bcast(&h, 1, MPI_INT, 0, comm);\n\n if(!rank) {\n for(int r = 0; r < numprocs; r++) {\n // Dimensions of r's local matrix\n int blacs_col = r / blacs_height;\n int blacs_row = r % blacs_height;\n long long mp = numroc_(&w, &nb, &blacs_row, &izero, &blacs_height);\n long long nq = numroc_(&h, &nb, &blacs_col, &izero, &blacs_width);\n long long numbytes = mp * nq * sizeof(double);\n\n // Set up block-cyclic distribution start\n long long y = nb * blacs_col, x = (long long) (nb * blacs_row) * sizeof(double);\n int y_blk = 0, x_blk = 0;\n if(!r) {\n // Self: place values from local buffer into global matrix\n _fill_cyclic(w, h, nb, blacs_width, blacs_height, blacs_col, blacs_row, a, b, LLONG_MAX, &y, &y_blk, &x, &x_blk);\n continue;\n }\n\n // Receive from process: receive matching chunks and place them into global matrix\n double * buf = malloc(_minl(numbytes, MAX_MPI_MSG_BYTES));\n for(long long pos = 0; pos < numbytes; pos += MAX_MPI_MSG_BYTES) {\n long long amt = _minl(numbytes - pos, MAX_MPI_MSG_BYTES);\n MPI_Recv(buf, (int) amt, MPI_BYTE, r, 2712, comm, MPI_STATUS_IGNORE);\n _fill_cyclic(w, h, nb, blacs_width, blacs_height, blacs_col, blacs_row, buf, b, MAX_MPI_MSG_BYTES, &y, &y_blk, &x, &x_blk);\n }\n free(buf);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc_scale.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "I_Bcast(&w, 1, MPI_LONG, 0, comm);\n MPI_Bcast(&h, 1, MPI_INT, 0, comm);\n\n if(!rank) {\n for(int r = 0; r < numprocs; r++) {\n // Rows assigned to process\n long long numrows = h / numprocs + (r < h % numprocs);\n long long numbytes = numrows * w;\n if(!r) {\n // Self: copy directly into local\n memcpy(b, a, numbytes);\n continue;\n }\n\n // Send to process: split message in chunks\n void * offset = a + ((long long) (h / numprocs) * (long long) r + _minl(h % numprocs, r)) * w;\n for(long long pos = 0; pos < numbytes; pos += MAX_MPI_MSG_BYTES) {\n long long amt = _minl(numbytes - pos, MAX_MPI_MSG_BYTES);\n MPI_Send(offset + pos, (int) amt, MPI_BYTE, r, 2713, comm);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc_scale.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "I_Bcast(&w, 1, MPI_LONG, 0, comm);\n MPI_Bcast(&h, 1, MPI_INT, 0, comm);\n\n if(!rank) {\n for(int r = 0; r < numprocs; r++) {\n // Rows assigned to process\n long long numrows = h / numprocs + (r < h % numprocs);\n long long numbytes = numrows * w;\n if(!r) {\n // Self: copy directly from local\n memcpy(b, a, numbytes);\n continue;\n }\n\n // Receive matching chunks from process\n void * offset = b + ((long long) (h / numprocs) * (long long) r + _minl(h % numprocs, r)) * w;\n for(long long pos = 0; pos < numbytes; pos += MAX_MPI_MSG_BYTES) {\n long long amt = _minl(numbytes - pos, MAX_MPI_MSG_BYTES);\n MPI_Recv(offset + pos, (int) amt, MPI_BYTE, r, 2714, comm, MPI_STATUS_IGNORE);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt m, int num) {\n for(int v = 0; v < m; v++) {\n long long h = X[v].h, w = X[v].w;\n for(long long x = 0; x < w; x++) {\n double mean = 0.0;\n for(long long y = 0; y < h; y++) mean += X[v].data[y * w + x];\n mean /= h;\n\n double std = 0.0;\n for(long long y = 0; y < h; y++) {\n double dev = X[v].data[y * w + x] - mean;\n std += dev * dev;\n }\n std /= h - 1;\n std = sqrt(std);\n if(std == 0) std = EPS;\n\n for(long long y = 0; y < h; y++) X[v].data[y * w + x] = (X[v].data[y * w + x] - mean) / std;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "trix(PN + 1, local_ted.h);\n\n int s = displs[rank]; // Start pattern for this process\n for(long long y = 0; y < pattern_cnts[rank]; y++) {\n local_ted.data[y * num + s + y] = INFINITY;\n \n heap h = new_heap(local_ted.data + y * num, PN + 1);\n for(long long x = PN + 1; x < num; x++)\n if(local_ted.data[y * num + x] < heap_max(h))\n replace(&h, local_ted.data + y * num + x);\n \n sums[v * pattern_cnts[rank] + y] = block_sum_ptr(h.data + 1, PN, 0);\n double denominator = *h.data[0] * PN - sums[v * pattern_cnts[rank] + y] + EPS;\n\n for(long long i = 0; i < PN + 1; i++) {\n sprs_val val = {\n .i = h.data[i] - (local_ted.data + y * num),\n .value = ((*h.data[0] - *h.data[i]) / denominator) * (i > 0),\n };\n S0[v].data[y * (PN + 1) + i] = val;\n ed[v].data[y * (PN + 1) + i] = *h.data[i];\n }\n\n free_heap(h);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s\n memset(U.data, 0x00, (long long) num * (long long) pattern_cnts[rank] * sizeof(double));\n\n for(long long y = 0; y < pattern_cnts[rank]; y++) {\n double sum = 0.0;\n for(long long i = 0; i < PN + 1; i++)\n for(int v = 0; v < m; v++) {\n sprs_val val = S0[v].data[y * (PN + 1) + i];\n double t = val.value / m;\n U.data[y * num + val.i] += t;\n sum += t;\n }\n for(long long x = 0; x < num; x++) U.data[y * num + x] /= sum;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * sums) {\n for(long long v = 0; v < m; v++) {\n double weight = w.data[v] * 2.0;\n\n for(long long y = 0; y < pattern_cnts[rank]; y++) {\n double max = ed[v].data[(PN + 1) * y];\n double maxU = U.data[y * num + S0[v].data[(PN + 1) * y].i];\n\n double sumU = 0.0;\n for(long long i = 1; i < PN + 1; i++) {\n long long x = S0[v].data[y * (PN + 1) + i].i;\n sumU += U.data[y * num + x];\n }\n\n double numerator = max - weight * maxU;\n double denominator = PN * max - sums[v * pattern_cnts[rank] + y] + weight * (sumU - PN * maxU) + EPS;\n\n for(long long i = 0; i < PN + 1; i++) {\n long long x = S0[v].data[y * (PN + 1) + i].i;\n double r = (numerator - ed[v].data[(PN + 1) * y + i] + weight * U.data[y * num + x]) / denominator;\n S0[v].data[y * (PN + 1) + i].value = r * (r > 0.0);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ".data, U.data, (long long) num * (long long) pattern_cnts[rank] * sizeof(double));\n \n for(long long y = 0; y < pattern_cnts[rank]; y++)\n for(long long i = 0; i < PN + 1; i++) {\n sprs_val val = S0[v].data[y * (PN + 1) + i];\n long long x = val.i;\n US.data[y * num + x] -= val.value;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "), dist.h, dist.data, local_dist.data, rank, numprocs, comm);\n if(!rank) free_matrix(dist);\n\n for(long long y = 0; y < pattern_cnts[rank]; y++) {\n int qw = 0;\n int * idx = malloc((long long) num * sizeof(int));\n\n #ifdef IS_LOCAL\n memset(idx, 0x00, (long long) num * sizeof(int));\n for(int v = 0; v < m; v++) {\n for(long long i = 0; i < PN + 1; i++) {\n sprs_val val = S0[v].data[y * (PN + 1) + i];\n long long x = val.i;\n qw -= idx[x];\n idx[x] |= (val.value > 0);\n qw += idx[x];\n }\n }\n #else\n memset(idx, 0x01, num * sizeof(int));\n qw = num;\n \n\n matrix q = new_matrix(qw, m);\n for(long long x = 0, i = 0; x < num; x++) {\n if(idx[x]) {\n q.data[i] = *lambda * local_dist.data[y * num + x] / (double) m * -0.5;\n i++;\n idx[x] = i;\n } \n }\n\n for(long long v = m - 1; v >= 0; v--)\n for(long long i = 0; i < qw; i++)\n q.data[v * qw + i] = q.data[i] / w.data[v];\n\n for(long long v = m - 1; v >= 0; v--) {\n for(long long i = 0; i < PN + 1; i++) {\n sprs_val val = S0[v].data[y * (PN + 1) + i];\n long long x = val.i;\n if(idx[x]) q.data[v * qw + idx[x] - 1] += val.value;\n }\n }\n\n q = update_u(q);\n for(long long x = 0, i = 0; x < num; x++) {\n if(idx[x]) {\n U.data[y * num + x] = q.data[i];\n i++;\n } else U.data[y * num + x] = 0.0;\n }\n\n free_matrix(q);\n free(idx);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/5uso/HiPGMC/src/gmc.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "C_STEP(\"End: symU\");\n bool * adj = malloc((long long) num * (long long) num * sizeof(bool));\n for(long long j = 0; j < num; j++)\n for(long long x = 0; x < j; x++)\n adj[j * num + x] = (U.data[j * num + x] != 0.0) || (U.data[x * num + j] != 0.0);\n\n // Final clustering. Find connected components on sU with Tarjan's algorithm\n GMC_STEP(\"End: final clustering\");\n int * y = malloc((long long) num * sizeof(int));\n int cluster_num = connected_comp(adj, y, num);\n\n // Cleanup\n GMC_STEP(\"End: cleanup\");\n free_matrix(F_old); free(adj);\n\n // Build output struct\n gmc_result result;\n result.U = U; result.S0 = S0; result.F = F; result.evs = evs; result.y = y; result.n = num; result.m = m;\n result.cluster_num = cluster_num; result.iterations = it + 1; result.lambda = lambda;\n return result;\n}\n\nvoid free_gmc_result(gmc_result r) {\n free_matrix(r.U);\n for(int i = 0; i < r.m; i++) free_sparse(r.S0[i]);\n free(r.S0);\n free_matrix(r.F);\n free_matrix(r.evs);\n free(r.y);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/UoB-HPC/miniBUDE/openmp/bude.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "arams.nposes);\n\n for(int p = 0; p < 6; p++){\n poses[p] = malloc(sizeof(float) * params.nposes);\nfor(int i = 0; i < params.nposes; i++){\n poses[p][i] = params.poses[p][i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/UoB-HPC/miniBUDE/openmp/bude.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " for\n for(int i = 0; i < params.nposes; i++){\n poses[p][i] = params.poses[p][i];\n }\n }\n\nfor(int i = 0; i < params.nposes; i++){\n buffer[i] = 0.f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/UoB-HPC/miniBUDE/openmp/bude.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "}\n }\n\n#pragma omp parallel for\n for(int i = 0; i < params.nposes; i++){\n buffer[i] = 0.f;\n }\n\nfor(int i = 0; i < params.natpro; i++){\n protein[i] = params.protein[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/UoB-HPC/miniBUDE/openmp/bude.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "omp parallel for\n for(int i = 0; i < params.natpro; i++){\n protein[i] = params.protein[i];\n }\n\nfor(int i = 0; i < params.natlig; i++){\n ligand[i] = params.ligand[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/UoB-HPC/miniBUDE/openmp/bude.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "a omp parallel for\n for(int i = 0; i < params.natlig; i++){\n ligand[i] = params.ligand[i];\n }\n\nfor(int i = 0; i < params.ntypes; i++){\n forcefield[i] = params.forcefield[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/UoB-HPC/miniBUDE/openmp/bude.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i = 0; i < params.ntypes; i++){\n forcefield[i] = params.forcefield[i];\n }\n\n\n // warm up 1 iter\nfor (unsigned group = 0; group < (params.nposes/WGSIZE); group++)\n {\n fasten_main(params.natlig, params.natpro, protein, ligand,\n poses[0], poses[1], poses[2],\n poses[3], poses[4], poses[5],\n buffer, forcefield, group);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/UoB-HPC/miniBUDE/makedeck/main.cpp", "omp_pragma_line": "#pragma omp parallel for default(none) shared(ligand, protein, ffParams, poses, energies, totalPoses, completed, std::cout)", "context_chars": 100, "text": "chrono::high_resolution_clock::now();\n\tsize_t completed = 0;\n\tsize_t totalPoses = config.poseSize;\n\tfor (size_t pose = 0; pose < totalPoses; pose++) {\n\t\tbude::kernel::fasten_main(\n\t\t\t\tligand.first.size(), protein.first.size(),\n\t\t\t\tprotein.first, ligand.first,\n\t\t\t\tposes.tilt, poses.roll, poses.pan,\n\t\t\t\tposes.xTrans, poses.yTrans, poses.zTrans,\n\t\t\t\tenergies, ffParams, pose);\n\t\t#pragma omp critical\n\t\t{\n\t\t\tcompleted++;\n\t\t\tif (completed % 10 == 0) {\n\t\t\t\tauto pct = static_cast((static_cast(completed) / totalPoses) * 100.0);\n\t\t\t\tstd::cout << \"[\"\n\t\t\t\t\t\t<< std::string(pct, '|') << std::string(100 - pct, ' ')\n\t\t\t\t\t\t<< \"] (\" << totalPoses << \"/\" << completed << \") \" << pct << \"%\\r\" << std::flush;\n\t\t\t}\n\t\t};\n\t} #pragma omp parallel for default(none) shared(ligand, protein, ffParams, poses, energies, totalPoses, completed, std::cout)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/mttkrp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_t const * const restrict bv = B->vals + (r * B->I);\n\n /* stretch out columns of A and B */\n for(idx_t x=0; x < nnz; ++x) {\n scratch[x] = vals[x] * av[indA[x]] * bv[indB[x]];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/sort.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic)", "context_chars": 100, "text": "nnz;\n\n /* for 3/4D, we can use quicksort on only the leftover modes */\n if(tt->nmodes == 3) {\n for(idx_t i = 0; i < nslices; ++i) {\n p_tt_quicksort2(tt, cmplt+1, histogram_array[i], histogram_array[i + 1]);\n for(idx_t j = histogram_array[i]; j < histogram_array[i + 1]; ++j) {\n tt->ind[m][j] = i;\n }\n } #pragma omp parallel for schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/sort.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic)", "context_chars": 100, "text": "m_array[i + 1]; ++j) {\n tt->ind[m][j] = i;\n }\n }\n\n } else if(tt->nmodes == 4) {\n for(idx_t i = 0; i < nslices; ++i) {\n p_tt_quicksort3(tt, cmplt+1, histogram_array[i], histogram_array[i + 1]);\n for(idx_t j = histogram_array[i]; j < histogram_array[i + 1]; ++j) {\n tt->ind[m][j] = i;\n }\n } #pragma omp parallel for schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/sort.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic)", "context_chars": 100, "text": " memmove(cmplt, cmplt+1, (tt->nmodes - 1) * sizeof(*cmplt));\n cmplt[tt->nmodes-1] = saved;\n\n for(idx_t i = 0; i < nslices; ++i) {\n p_tt_quicksort(tt, cmplt, histogram_array[i], histogram_array[i + 1]);\n for(idx_t j = histogram_array[i]; j < histogram_array[i + 1]; ++j) {\n tt->ind[m][j] = i;\n }\n } #pragma omp parallel for schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/matrix.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "N = B->J;\n idx_t const Na = A->J;\n\n /* tiled matrix multiplication */\n idx_t const TILE = 16;\n for(idx_t i=0; i < M; ++i) {\n for(idx_t jt=0; jt < N; jt += TILE) {\n for(idx_t kt=0; kt < Na; kt += TILE) {\n idx_t const JSTOP = SS_MIN(jt+TILE, N);\n for(idx_t j=jt; j < JSTOP; ++j) {\n val_t accum = 0;\n idx_t const KSTOP = SS_MIN(kt+TILE, Na);\n for(idx_t k=kt; k < KSTOP; ++k) {\n accum += av[k + (i*Na)] * bv[j + (k*N)];\n }\n cv[j + (i*N)] += accum;\n }\n }\n }\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/sptensor.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "t(hist, 0, tt->dims[mode] * sizeof(*hist));\n\n idx_t const * const restrict inds = tt->ind[mode];\n for(idx_t x=0; x < tt->nnz; ++x) {\n #pragma omp atomic\n ++hist[inds[x]];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/io.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "t read_count = SS_MIN(BUF_LEN, count - n);\n fread(ubuf, sizeof(*ubuf), read_count, fin);\n for(idx_t i=0; i < read_count; ++i) {\n buffer[n + i] = ubuf[i];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/io.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "t read_count = SS_MIN(BUF_LEN, count - n);\n fread(ubuf, sizeof(*ubuf), read_count, fin);\n for(idx_t i=0; i < read_count; ++i) {\n buffer[n + i] = ubuf[i];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/ftensor.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:nfibs)", "context_chars": 100, "text": " ttinds[nmodes-1][0];\n ft->vals[0] = tt->vals[0];\n\n /* count fibers in tt */\n idx_t nfibs = 0;\n for(idx_t n=1; n < nnz; ++n) {\n for(idx_t m=0; m < nmodes-1; ++m) {\n /* check for new fiber */\n if(ttinds[m][n] != ttinds[m][n-1]) {\n ++nfibs;\n break;\n }\n }\n ft->inds[n] = ttinds[nmodes-1][n];\n ft->vals[n] = tt->vals[n];\n } #pragma omp parallel for reduction(+:nfibs)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/csf.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "ices = csf->pt[tile_id].nfibs[0];\n idx_t * weights = splatt_malloc(nslices * sizeof(*weights));\n\n for(idx_t i=0; i < nslices; ++i) {\n weights[i] = p_csf_count_nnz(csf->pt[tile_id].fptr, csf->nmodes, 0, i);\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/csf.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " idx_t const ntiles = csf->ntiles;\n idx_t * weights = splatt_malloc(ntiles * sizeof(*weights));\n\n for(idx_t i=0; i < ntiles; ++i) {\n weights[i] = csf->pt[i].nfibs[nmodes-1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/graph.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "er */\n case VTX_WT_FIB_NNZ:\n hg->vwts = (idx_t *) splatt_malloc(hg->nvtxs * sizeof(idx_t));\n for(idx_t v=0; v < hg->nvtxs; ++v) {\n hg->vwts[v] = ft->fptr[v+1] - ft->fptr[v];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/mpi/mpi_cpd.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "al_t * const restrict gmatv = globalmat->vals;\n\n /* copy my partial products into the sendbuf */\n for(idx_t s=0; s < rinfo->nlocal2nbr[m]; ++s) {\n idx_t const row = local2nbr_inds[s];\n for(idx_t f=0; f < nfactors; ++f) {\n local2nbr_buf[f + (s*nfactors)] = matv[f + (row*nfactors)];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/mpi/mpi_io.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": " p_rearrange_medium(\n sptensor_t * const ttbuf,\n idx_t * * ssizes,\n rank_info * const rinfo)\n{\n for(idx_t m=0; m < ttbuf->nmodes; ++m) {\n p_find_layer_boundaries(ssizes, m, rinfo);\n } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/mpi/mpi_io.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "o);\n }\n\n /* create partitioning */\n int * parts = splatt_malloc(ttbuf->nnz * sizeof(*parts));\n\n for(idx_t n=0; n < ttbuf->nnz; ++n) {\n parts[n] = mpi_determine_med_owner(ttbuf, n, rinfo);\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/mpi/mpi_io.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "ssizes, rinfo);\n\n /* now map tensor indices to local (layer) coordinates and fill in dims */\n for(idx_t m=0; m < ttbuf->nmodes; ++m) {\n tt->dims[m] = rinfo->layer_ends[m] - rinfo->layer_starts[m];\n for(idx_t n=0; n < tt->nnz; ++n) {\n assert(tt->ind[m][n] >= rinfo->layer_starts[m]);\n assert(tt->ind[m][n] < rinfo->layer_ends[m]);\n tt->ind[m][n] -= rinfo->layer_starts[m];\n }\n } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/mpi/mpi_io.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "ry_file(fin, comm);\n break;\n }\n\n if(rank == 0) {\n fclose(fin);\n }\n\n /* set dims info */\n for(idx_t m=0; m < tt->nmodes; ++m) {\n idx_t const * const inds = tt->ind[m];\n idx_t dim = 1 +inds[0];\n for(idx_t n=1; n < tt->nnz; ++n) {\n dim = SS_MAX(dim, 1 + inds[n]);\n }\n tt->dims[m] = dim;\n } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/mpi/mpi_io.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "tor = mat_rand(rinfo->global_dims[mode], nfactors);\n\n /* copy root's own matrix to output */\n for(idx_t i=0; i < localdim; ++i) {\n idx_t const gi = rinfo->mat_start[mode] + perm->iperms[mode][i];\n for(idx_t f=0; f < nfactors; ++f) {\n mymat->vals[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)];\n }\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ShadenSmith/splatt/src/mpi/mpi_io.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "ecv(loc_perm, nrows, SPLATT_MPI_IDX, p, 2, rinfo->comm_3d, &status);\n\n /* fill buffer */\n for(idx_t i=0; i < nrows; ++i) {\n idx_t const gi = layerstart + loc_perm[i];\n for(idx_t f=0; f < nfactors; ++f) {\n vbuf[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)];\n }\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/adammaj1/Mandelbrot-set-with-blended-gradients/src/d.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) private(ix,iy, i, Cx, Cy) shared(A, ixMax , iyMax) ", "context_chars": 100, "text": " coordinate \n\t\n\tfprintf(stderr, \"compute image CheckOrientation\\n\");\n \t// for all pixels of image \n\tfor (iy = iyMin; iy <= iyMax; ++iy){\n \t\tfprintf (stderr, \" %d from %d \\r\", iy, iyMax);\t//info \n \t\tfor (ix = ixMin; ix <= ixMax; ++ix){\n \t\t\t// from screen to world coordinate \n \t\t\tCy = GiveCy(iy);\n \t\t\tCx = GiveCx(ix);\n\t \t\ti = Give_i(ix, iy); /* compute index of 1D array from indices of 2D array */\n\t \t\tif (Cx>0 && Cy>0) A[i]=255-A[i]; // check the orientation of Z-plane by marking first quadrant */\n \t\t}\n \t} #pragma omp parallel for schedule(dynamic) private(ix,iy, i, Cx, Cy) shared(A, ixMax , iyMax) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/adammaj1/Mandelbrot-set-with-blended-gradients/src/d.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) private(ix,iy) shared(A, ixMax , iyMax)", "context_chars": 100, "text": " int ix, iy;\t\t// pixel coordinate \n\n \t//printf(\"compute image \\n\");\n \t// for all pixels of image \n\tfor (iy = iyMin; iy <= iyMax; ++iy){ \n \t\tfprintf (stderr, \" %d from %d \\r\", iy, iyMax);\t//info \n \t\tfor (ix = ixMin; ix <= ixMax; ++ix)\n \t\t\tComputePoint_dData(A, RepresentationFunction, ix, iy);\t// \n } #pragma omp parallel for schedule(dynamic) private(ix,iy) shared(A, ixMax , iyMax)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/adammaj1/Mandelbrot-set-with-blended-gradients/src/d.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) private(i) shared( D, C, iSize)", "context_chars": 100, "text": "rr, \"\\nFill_rgbData_from_dData\\n\");\n \t//printf(\"compute image \\n\");\n \t// for all pixels of image \n\tfor (i = 0; i < iSize; ++i){\n \t\t//fprintf (stderr, \"rgb %d from %d \\r\", i, iSize);\t//info \n \t\tComputeAndSaveColor(i, D, RepresentationFunction, Gradient, C);\t// \n } #pragma omp parallel for schedule(dynamic) private(i) shared( D, C, iSize)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/adammaj1/Mandelbrot-set-with-blended-gradients/src/d.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) private(i) shared( C1, C2, C, iSize)", "context_chars": 100, "text": ", \"\\nFill_rgbData_from_2_dData\\n\");\n \t//printf(\"compute image \\n\");\n \t// for all pixels of image \n\tfor (i = 0; i < iSize; ++i){\n \t\t\n \t\tComputeAndSaveBlendColor( C1, C2, Blend, i, C);\n } #pragma omp parallel for schedule(dynamic) private(i) shared( C1, C2, C, iSize)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/trrt-good/NeuralNetworks.c/NeuralNetCPU/neural_net_legacy.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "dense(nnet->weights[0], npl[1], npl[0], inputs, nnet->biases[0], activations[0]);\n int i;\n // for (i = 1; i < LAYERS; i++)\n {\n nnet_layer_function_dense(nnet->weights[i], npl[i + 1], npl[i], activations[i - 1], nnet->biases[i], activations[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/trrt-good/NeuralNetworks.c/NeuralNetCPU/neural_net_legacy.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " training_set->num_examples));\n for (batch = 0; batch < parallel_batches; batch++)\n {\nfor (thread = 0; thread < MAX_THREADS; thread++)\n {\n for (int nthExample = (batch * MAX_THREADS + thread) * examples_per_thread; nthExample < (batch * MAX_THREADS + thread + 1) * examples_per_thread; nthExample++)\n {\n nnet_adjust_gradients(nnet, activations[thread], weight_gradients[thread], bias_gradients[thread], weight_product[thread], weight_product_buffer[thread], training_set->inputs[nthExample], training_set->outputs[nthExample]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/trrt-good/NeuralNetworks.c/NeuralNetCPU/neural_net.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t->num_examples));\n for (batch = 0; batch < parallel_batches; batch++)\n {\n for (thread = 0; thread < MAX_THREADS; thread++)\n {\n for (int nthExample = (batch * MAX_THREADS + thread) * examples_per_thread; nthExample < (batch * MAX_THREADS + thread + 1) * examples_per_thread; nthExample++)\n {\n nnet_backprop(nnet, activations[thread], weight_gradients[thread], bias_gradients[thread], chain_rule_vector[thread], math_buffer[thread], training_set->inputs[nthExample], training_set->outputs[nthExample]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ENCCS/intermediate-mpi/content/code/day-4/10_integrate-pi/solution/pi-integration.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:local_pi)", "context_chars": 100, "text": "ntf(\"rank %d: start=%ld, end=%ld\\n\", rank, start, end);\n\n double local_pi = 0.0;\n long int i;\nfor (i = start; i < end; i++) {\n double x = delta_x * ((double)(i) + 0.5);\n local_pi += 1.0 / (1.0 + x * x);\n } #pragma omp parallel for reduction(+:local_pi)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ENCCS/intermediate-mpi/content/code/day-4/01_threading-funneled/threading-funneled.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "tribute each\n * iteration to a different thread. */\n /* int local_work[] = FIXME; */\nfor (int k = 0; k != 2; k = k + 1)\n {\n /* compute_row(FIXME, working_data_set, next_working_data_set); */\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ENCCS/intermediate-mpi/content/code/day-4/01_threading-funneled/threading-funneled.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ute each\n * iteration to a different thread. */\n /* int non_local_work[] = FIXME; */\nfor (int k = 0; k != 2; k = k + 1)\n {\n /* compute_row(FIXME, working_data_set, next_working_data_set); */\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ENCCS/intermediate-mpi/content/code/day-4/01_threading-funneled/solution/threading-funneled.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "l distribute each\n * iteration to a different thread. */\n int local_work[] = {2, 3};\nfor (int k = 0; k != 2; k = k + 1)\n {\n compute_row(local_work[k], working_data_set, next_working_data_set);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ENCCS/intermediate-mpi/content/code/day-4/01_threading-funneled/solution/threading-funneled.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "stribute each\n * iteration to a different thread. */\n int non_local_work[] = {1, 4};\nfor (int k = 0; k != 2; k = k + 1)\n {\n compute_row(non_local_work[k], working_data_set, next_working_data_set);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ENCCS/intermediate-mpi/content/code/day-4/02_threading-multiple/threading-multiple.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "cal computation. OpenMP will distribute each\n * iteration to a different thread. */\nfor (int k = 0; k != 2; k = k + 1)\n {\n compute_row(/* FIXME */);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ENCCS/intermediate-mpi/content/code/day-4/02_threading-multiple/threading-multiple.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * iteration to a different thread. */\n int non_local_work[] = /* FIXME */;\nfor (int k = 0; k != 2; k = k + 1)\n {\n compute_row(/* FIXME */);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ENCCS/intermediate-mpi/content/code/day-4/02_threading-multiple/solution/threading-multiple.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "cal computation. OpenMP will distribute each\n * iteration to a different thread. */\nfor (int k = 0; k != 2; k = k + 1)\n {\n compute_row(local_work[k], working_data_set, next_working_data_set);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ENCCS/intermediate-mpi/content/code/day-4/02_threading-multiple/solution/threading-multiple.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * iteration to a different thread. */\n int non_local_work[] = {1, 4};\nfor (int k = 0; k != 2; k = k + 1)\n {\n compute_row(non_local_work[k], working_data_set, next_working_data_set);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "r(size_t i=0; i < particleVec.size(); i++)\n {\n double densityVal = 0;\n for(size_t j = 0; j < particleVec[i].neighbours.size(); j++)\n {\n if(i == particleVec[i].neighbours[j])continue;\n const Helper::Point3D diffParticleNeighbour = particleVec[i].position - particleVec[particleVec[i].neighbours[j]].position;\n\n if(Config::SupportRadius - diffParticleNeighbour.calcNorm() > DBL_EPSILON)\n {\n densityVal += particleVec[particleVec[i].neighbours[j]].mass * PolyKernel(diffParticleNeighbour);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n void IISPHForces::compute_gravity_force(ParticleVec& particleVec)\n {\n for(auto& particle: particleVec)\n {\n particle.fGravity = Config::GravitationalAcceleration * particle.mass;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n void IISPHForces::compute_advection_forces(ParticleVec& particleVec)\n {\n for(auto& particle: particleVec)\n {\n particle.fAdvection = particle.fGravity;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n void IISPHForces::compute_predicted_velocity(ParticleVec& particleVec)\n {\n for(auto& particle: particleVec)\n {\n particle.predicted_velocity = particle.velocity + Config::timestep*particle.fAdvection/particle.mass;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "cle.mass;\n }\n }\n void IISPHForces::compute_DII(ParticleVec& particleVec)\n {\n for(size_t i=0; i < particleVec.size(); i++)\n {\n Helper::Point3D finalDII = Helper::Point3D(0,0,0);\n #pragma omp parallel for \n for(size_t j=0; j< particleVec[i].neighbours.size(); j++)\n {\n if(i == particleVec[i].neighbours[j])continue;\n const Helper::Point3D diffParticleNeighbour = particleVec[i].position - particleVec[particleVec[i].neighbours[j]].position;\n \n if(Config::SupportRadius - diffParticleNeighbour.calcNorm() > DBL_EPSILON && diffParticleNeighbour.calcNorm()!=0)\n {\n finalDII += -1 * Config::timestep * Config::timestep * particleVec[particleVec[i].neighbours[j]].mass * PolyKernelGradient(diffParticleNeighbour)/(particleVec[i].density*particleVec[i].density); \n }\n }\n if(std::isnan(finalDII.x) || std::isnan(finalDII.y) || std::isnan(finalDII.z)) std::cout << \"computeDII\" << i << std::endl;\n particleVec[i].dii = finalDII;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "c.size(); i++)\n {\n Helper::Point3D finalDII = Helper::Point3D(0,0,0);\n for(size_t j=0; j< particleVec[i].neighbours.size(); j++)\n {\n if(i == particleVec[i].neighbours[j])continue;\n const Helper::Point3D diffParticleNeighbour = particleVec[i].position - particleVec[particleVec[i].neighbours[j]].position;\n \n if(Config::SupportRadius - diffParticleNeighbour.calcNorm() > DBL_EPSILON && diffParticleNeighbour.calcNorm()!=0)\n {\n finalDII += -1 * Config::timestep * Config::timestep * particleVec[particleVec[i].neighbours[j]].mass * PolyKernelGradient(diffParticleNeighbour)/(particleVec[i].density*particleVec[i].density); \n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "Point3D diffParticleNeighbour = pV[i].position - pV[pV[i].neighbours[j]].position;\n\n if(Config::SupportRadius - diffParticleNeighbour.calcNorm() > DBL_EPSILON && diffParticleNeighbour.calcNorm()!=0)\n {\n pV[i].predicted_density += Config::timestep* pV[pV[i].neighbours[j]].mass * (pV[i].predicted_velocity - pV[pV[i].neighbours[j]].predicted_velocity).dot(PolyKernelGradient(diffParticleNeighbour)); \n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "Point3D diffParticleNeighbour = pV[i].position - pV[pV[i].neighbours[j]].position;\n\n if(Config::SupportRadius - diffParticleNeighbour.calcNorm() > DBL_EPSILON && diffParticleNeighbour.calcNorm()!=0)\n {\n Helper::Point3D dji = -1 * Config::timestep * Config::timestep *( pV[pV[i].neighbours[j]].mass/pow(pV[pV[i].neighbours[j]].density,2)) * PolyKernelGradient(-1 * diffParticleNeighbour);\n \n aiiSUM += pV[pV[i].neighbours[j]].mass *(pV[i].dii - dji).dot(PolyKernelGradient(diffParticleNeighbour));\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "cleVec& particleVec, int i)\n {\n Helper::Point3D sumDIJ = Helper::Point3D(0,0,0);\n\n for(size_t j=0; j< particleVec[i].neighbours.size(); j++)\n {\n if(i == particleVec[i].neighbours[j])continue;\n particleVec[particleVec[i].neighbours[j]].prev_iterate_pressure = particleVec[particleVec[i].neighbours[j]].curr_iterate_pressure;\n\n const Helper::Point3D diffParticleNeighbour = particleVec[i].position - particleVec[particleVec[i].neighbours[j]].position;\n if(!diffParticleNeighbour.calcNorm())continue;\n sumDIJ += -1 * particleVec[particleVec[i].neighbours[j]].mass * particleVec[particleVec[i].neighbours[j]].prev_iterate_pressure * (PolyKernelGradient(diffParticleNeighbour))/(pow(particleVec[particleVec[i].neighbours[j]].density,2));\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ize(); i++)\n {\n double corr_density=0.0;\n \n for(size_t j=0; j< pV[i].neighbours.size(); j++)\n {\n if(i==pV[i].neighbours[j])continue;\n const Helper::Point3D diffParticleNeighbour = pV[pV[i].neighbours[j]].position - pV[i].position; \n if(!diffParticleNeighbour.calcNorm())continue;\n Helper::Point3D dji = -1 * Config::timestep * Config::timestep * (pV[i].mass/(pV[i].density*pV[i].density))*PolyKernelGradient(diffParticleNeighbour);\n \n corr_density += pV[pV[i].neighbours[j]].mass * (pV[i].sigma_dij - (pV[pV[i].neighbours[j]].dii * pV[pV[i].neighbours[j]].prev_iterate_pressure) - (pV[pV[i].neighbours[j]].sigma_dij - dji*pV[i].pressure)).dot(PolyKernelGradient(-1*diffParticleNeighbour));\n\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ble IISPHForces::ErrorDensity(ParticleVec& particleVec)\n {\n double avg_density=0;\n for(size_t i=0; i< particleVec.size(); i++)\n {\n avg_density += particleVec[i].corrected_density ;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ce(ParticleVec& pV, int i)\n {\n Helper::Point3D forceSum = Helper::Point3D(0,0,0);\n for(size_t j=0; j < pV[i].neighbours.size(); j++)\n {\n if(i == pV[i].neighbours[j])continue;\n const Helper::Point3D diffParticleNeighbour = pV[i].position - pV[pV[i].neighbours[j]].position;\n if(!diffParticleNeighbour.calcNorm())continue;\n forceSum += pV[pV[i].neighbours[j]].mass * ((pV[i].pressure/pow(pV[i].density,2)) + (pV[pV[i].neighbours[j]].pressure/pow(pV[pV[i].neighbours[j]].density,2))) * PolyKernelGradient(diffParticleNeighbour);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/agam-kashyap/Fluid-Simulation-Rendering/Simulation/src/iisph-forces.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * -1 * pV[i].mass;\n }\n void IISPHForces::integration(ParticleVec& particleVec)\n {\n for(size_t i=0; i < particleVec.size(); i++)\n {\n if(std::isnan(particleVec[i].velocity.x) || std::isnan(particleVec[i].velocity.y) || std::isnan(particleVec[i].velocity.z))\n {\n particleVec[i].predicted_velocity = Helper::Point3D(1,1,1);\n }\n particleVec[i].velocity = particleVec[i].predicted_velocity + Config::timestep * compute_pressure_force(particleVec, i)/particleVec[i].mass ;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rintf (\"Number of Threads counted = %i\\n\",k);\n#endif\n\n /* Get initial value for system clock. */\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rity appears to be \"\n\t \"less than one microsecond.\\n\");\n\tquantum = 1;\n }\n\n t = mysecond();\nfor (j = 0; j < STREAM_ARRAY_SIZE; j++)\n\t\ta[j] = 2.0E0 * a[j];\n t = 1.0E6 * (mysecond() - t);\n\n printf(\"Each test below will take on the order\"\n\t\" of %d microseconds.\\n\", (int) t );\n printf(\" (= %d clock ticks)\\n\", (int) (t/quantum) );\n printf(\"Increase the size of the arrays if this shows that\\n\");\n printf(\"you are not getting at least 20 clock ticks per test.\\n\");\n\n printf(HLINE);\n\n printf(\"WARNING -- The above is only a rough guideline.\\n\");\n printf(\"For best results, please be sure you know the\\n\");\n printf(\"precision of your system timer.\\n\");\n printf(HLINE);\n\n /*\t--- MAIN LOOP --- repeat test cases NTIMES times --- */\n\n scalar = 3.0;\n for (k=0; k #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (k=0; kfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " - times[0][k];\n\t\n\ttimes[1][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Scale(scalar);\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "second() - times[1][k];\n\t\n\ttimes[2][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Add();\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " - times[2][k];\n\t\n\ttimes[3][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Triad(scalar);\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "#ifdef TUNED\n/* stubs for \"tuned\" versions of the kernels */\nvoid tuned_STREAM_Copy()\n{\n\tssize_t j;\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "AY_SIZE; j++)\n c[j] = a[j];\n}\n\nvoid tuned_STREAM_Scale(STREAM_TYPE scalar)\n{\n\tssize_t j;\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r (j=0; jfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/openmpbench/syncbench.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "Y\n\tstatic int n = 1;\n\tXRayAnnotate(\"n = %i\", n);\n\tn++;\n#endif\n for (j = 0; j < innerreps; j++) {\nfor (i = 0; i < nthreads; i++) {\n\t delay(delaylength);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/libhermit/usr/openmpbench/syncbench.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (static,1)", "context_chars": 100, "text": "ck);\n\t delay(delaylength);\n\t omp_unset_lock(&lock);\n\t}\n }\n}\n\nvoid testorder() {\n int j;\nfor (j = 0; j < (int)innerreps; j++) {\n#pragma omp ordered\n\tdelay(delaylength);\n } #pragma omp parallel for ordered schedule (static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rintf (\"Number of Threads counted = %i\\n\",k);\n#endif\n\n /* Get initial value for system clock. */\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rity appears to be \"\n\t \"less than one microsecond.\\n\");\n\tquantum = 1;\n }\n\n t = mysecond();\nfor (j = 0; j < STREAM_ARRAY_SIZE; j++)\n\t\ta[j] = 2.0E0 * a[j];\n t = 1.0E6 * (mysecond() - t);\n\n printf(\"Each test below will take on the order\"\n\t\" of %d microseconds.\\n\", (int) t );\n printf(\" (= %d clock ticks)\\n\", (int) (t/quantum) );\n printf(\"Increase the size of the arrays if this shows that\\n\");\n printf(\"you are not getting at least 20 clock ticks per test.\\n\");\n\n printf(HLINE);\n\n printf(\"WARNING -- The above is only a rough guideline.\\n\");\n printf(\"For best results, please be sure you know the\\n\");\n printf(\"precision of your system timer.\\n\");\n printf(HLINE);\n\n /*\t--- MAIN LOOP --- repeat test cases NTIMES times --- */\n\n scalar = 3.0;\n for (k=0; k #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (k=0; kfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " - times[0][k];\n\t\n\ttimes[1][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Scale(scalar);\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "second() - times[1][k];\n\t\n\ttimes[2][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Add();\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " - times[2][k];\n\t\n\ttimes[3][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Triad(scalar);\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "#ifdef TUNED\n/* stubs for \"tuned\" versions of the kernels */\nvoid tuned_STREAM_Copy()\n{\n\tssize_t j;\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "AY_SIZE; j++)\n c[j] = a[j];\n}\n\nvoid tuned_STREAM_Scale(STREAM_TYPE scalar)\n{\n\tssize_t j;\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/benchmarks/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r (j=0; jfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/openmpbench/syncbench.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "Y\n\tstatic int n = 1;\n\tXRayAnnotate(\"n = %i\", n);\n\tn++;\n#endif\n for (j = 0; j < innerreps; j++) {\nfor (i = 0; i < nthreads; i++) {\n\t delay(delaylength);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hermit-os/hermit-playground/usr/openmpbench/syncbench.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (static,1)", "context_chars": 100, "text": "ck);\n\t delay(delaylength);\n\t omp_unset_lock(&lock);\n\t}\n }\n}\n\nvoid testorder() {\n int j;\nfor (j = 0; j < (int)innerreps; j++) {\n#pragma omp ordered\n\tdelay(delaylength);\n } #pragma omp parallel for ordered schedule (static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/RIKEN-SysSoft/mckernel/test/uti/CT30.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \"Error: pthread_create: %s\\n\", strerror(errno));\n\t\t\texit(1);\n\t\t}\n\t}\n\n\tpthread_barrier_wait(&bar);\n\nfor (i = 0; i < omp_get_num_threads(); i++) {\n\t\tprintf(\"[INFO] thread_num=%d,tid=%d\\n\", i, syscall(SYS_gettid));\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/RIKEN-SysSoft/mckernel/test/uti/mpi/011.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "memset(recvv[i], 0, szentry);\n\t}\n\n\tprintf(\"after memset\\n\");\n\n\tprint_cpu_last_executed_on(\"main\");\n\nfor (i = 0; i < omp_get_num_threads(); i++) {\n\t\tprintf(\"thread_num=%d,tid=%d\\n\", i, syscall(SYS_gettid));\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/oshmpi/tests/stream_omp.c", "omp_pragma_line": "#pragma omp parallel for num_threads(NTHREADS)", "context_chars": 100, "text": "r (j = 0; j < STREAM_ARRAY_SIZE; j += blocksize)\n\t{\n\t if (next_p == count_p)\n\t {\n#ifdef _OPENMP\n#ifdef __INTEL_COMPILER\n#pragma vector aligned\n#pragma ivdep\n\n\t for (i = j; i < (j + blocksize); i++)\n\t\t{\n\t\t a[i] = 1.0;\n\t\t b[i] = 2.0;\n\t\t c[i] = 0.0;\n\t\t a[i] = 2.0E0 * a[i];\n\n\t\t} #pragma omp parallel for num_threads(NTHREADS)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/oshmpi/tests/stream_omp.c", "omp_pragma_line": "#pragma omp parallel for num_threads(NTHREADS)", "context_chars": 100, "text": " (j = 0; j < STREAM_ARRAY_SIZE; j += blocksize)\n\t{\n\t if (next_p == count_p)\n\t {\n\n#ifdef _OPENMP\n#ifdef __INTEL_COMPILER\n#pragma vector aligned\n#pragma ivdep\n\n\t for (i = j; i < (j + blocksize); i++)\n\t\t{\n\t\t c[i] = a[i];\n\t\t} #pragma omp parallel for num_threads(NTHREADS)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/oshmpi/tests/stream_omp.c", "omp_pragma_line": "#pragma omp parallel for num_threads(NTHREADS)", "context_chars": 100, "text": "r (j = 0; j < STREAM_ARRAY_SIZE; j += blocksize)\n\t{\n\t if (next_p == count_p)\n\t {\n#ifdef _OPENMP\n#ifdef __INTEL_COMPILER\n#pragma vector aligned\n#pragma ivdep\n\n\t for (i = j; i < (j + blocksize); i++)\n\t\t{\n\t\t b[i] = scalar * c[i];\n\t\t} #pragma omp parallel for num_threads(NTHREADS)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/oshmpi/tests/stream_omp.c", "omp_pragma_line": "#pragma omp parallel for num_threads(NTHREADS)", "context_chars": 100, "text": "r (j = 0; j < STREAM_ARRAY_SIZE; j += blocksize)\n\t{\n\t if (next_p == count_p)\n\t {\n#ifdef _OPENMP\n#ifdef __INTEL_COMPILER\n#pragma vector aligned\n#pragma ivdep\n\n\t for (i = j; i < (j + blocksize); i++)\n\t\t{\n\t\t c[i] = a[i] + b[i];\n\t\t} #pragma omp parallel for num_threads(NTHREADS)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/oshmpi/tests/stream_omp.c", "omp_pragma_line": "#pragma omp parallel for num_threads(NTHREADS)", "context_chars": 100, "text": "r (j = 0; j < STREAM_ARRAY_SIZE; j += blocksize)\n\t{\n\t if (next_p == count_p)\n\t {\n#ifdef _OPENMP\n#ifdef __INTEL_COMPILER\n#pragma vector aligned\n#pragma ivdep\n\n\t for (i = j; i < (j + blocksize); i++)\n\t\t{\n\t\t a[i] = b[i] + scalar * c[i];\n\t\t} #pragma omp parallel for num_threads(NTHREADS)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/simd/simd-memtest/util.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:errors)", "context_chars": 100, "text": "re_doubles(size_t n, const double * RESTRICT x, const double * RESTRICT y)\n{\n size_t errors = 0;\nfor (size_t i=0; i #pragma omp parallel for reduction(+:errors)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/simd/simd-memtest/util.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:errors)", "context_chars": 100, "text": "ize_t n, const double * RESTRICT x, const double * RESTRICT y, int stride)\n{\n size_t errors = 0;\nfor (size_t i=0; i #pragma omp parallel for reduction(+:errors)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/simd/simd-memtest/util.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:errors)", "context_chars": 100, "text": "st double * RESTRICT x, const double * RESTRICT y, int stride, double val)\n{\n size_t errors = 0;\nfor (size_t i=0; i #pragma omp parallel for reduction(+:errors)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/simd/simd-memtest/util.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " errors++;\n }\n }\n return errors;\n}\n\nvoid init_doubles(size_t n, double * RESTRICT x)\n{\nfor (size_t i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/simd/simd-memtest/util.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n x[i] = (double)i;\n }\n}\n\nvoid set_doubles(size_t n, double value, double * RESTRICT x)\n{\nfor (size_t i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/tuning/transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for if(n>OMP_MIN_SIZE)", "context_chars": 100, "text": "eof(double) );\n B = safemalloc( N * sizeof(double) );\n\n int k = 0;\n#ifdef OMP\nfor ( int i=0 ; iOMP_MIN_SIZE)\n\n for ( int i=0 ; iOMP_MIN_SIZE)\n\n for ( int i=0 ; i #pragma omp parallel for if(n>OMP_MIN_SIZE)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/tuning/transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for if(n>OMP_MIN_SIZE)", "context_chars": 100, "text": " for ( int j=0 ; jfor ( int i=0 ; iOMP_MIN_SIZE)\n\n for ( int i=0 ; i #pragma omp parallel for if(n>OMP_MIN_SIZE)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/tuning/transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for if(n>OMP_MIN_SIZE)", "context_chars": 100, "text": "\n t0 = getticks();\n for ( int t=0 ; tfor ( int i=0 ; iOMP_MIN_SIZE)\n\n for ( int i=0 ; i #pragma omp parallel for if(n>OMP_MIN_SIZE)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/tuning/transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for if(n>OMP_MIN_SIZE)", "context_chars": 100, "text": "\n t0 = getticks();\n for ( int t=0 ; tfor ( int i=0 ; iOMP_MIN_SIZE)\n\n for ( int j=0 ; j #pragma omp parallel for if(n>OMP_MIN_SIZE)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/tuning/transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for if(n>OMP_MIN_SIZE)", "context_chars": 100, "text": "\n t0 = getticks();\n for ( int t=0 ; tfor ( int j=0 ; jOMP_MIN_SIZE)\n\n //#pragma unroll(4)\n #pragma unroll_and_jam\n for ( int j=0 ; j #pragma omp parallel for if(n>OMP_MIN_SIZE)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/tuning/transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for if(n>OMP_MIN_SIZE)", "context_chars": 100, "text": " s1 loads */\n t0 = getticks();\n for ( int t=0 ; t//#pragma unroll(4)\n #pragma unroll_and_jam\n for ( int j=0 ; jOMP_MIN_SIZE)\n\n {\n int n4 = n-(n%4); /* divisible-by-4 part */\n#ifdef OMP\n#pragma omp for private(i,j,n4)\n\n for ( int j=0 ; j #pragma omp parallel for if(n>OMP_MIN_SIZE)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/distribute.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " */\n\nint main(int argc, char* argv[])\n{\n#ifdef DISTRIBUTE\n#pragma omp distribute parallel for\n#else\nfor (int i=0; i<100; i++) {\n printf(\"tid=%d\\n\", omp_get_thread_num());\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/matrix.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "A!=NULL);\n assert(B!=NULL);\n assert(C!=NULL);\n\n double t0, t1;\n\n#pragma omp parallel\n{\n for (int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/matrix.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " for (int i=0; ifor (int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/matrix.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " for (int i=0; ifor (int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/matrix.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "for (int k=0; k #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/matrix.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " C[i*n+j] += A[i*n+k] * B[k*n+j];\n\n t1 = omp_get_wtime();\n}\n\n double x = 0.0;\n for (int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/stl.cc", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "uble> A(1000);\n\n#if 0\n #pragma omp workshare\n std::iota(A.begin(), A.end(), 0.0);\n#endif\n\n std::for_each( std::begin(A), std::end(A), [&] (double x) { std::cout << x; } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_4.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "STRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n #elif defined(_OPENMP)\n #warning No OpenMP simd support!\n #pragma omp parallel for\n#else\n #warning No OpenMP support!\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n //#pragma omp target teams distribute map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp parallel for simd\n#else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_4.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")\n #pragma omp parallel for simd\n#elif defined(_OPENMP)\n #warning No OpenMP simd support!\n #else\n #warning No OpenMP support!\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n //#pragma omp target teams distribute map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp parallel for simd\n#else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_4.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd3(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#ifdef __cilk\n _Cilk_for(int i = 0; i < n; i++)\n#else\n #warning No Cilk support. Using sequential for loop.\n for(int i = 0; i < n; i++)\n\n c[i] = a[i] + b[i];\n} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_4.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rom:c[0:n])\n #pragma omp parallel for simd\n#else\n #warning No OpenMP target/simd support!\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd3(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#ifdef __cilk\n _Cilk_for(int i = 0; i < n; i++)\n#else\n #warning No Cilk support. Using sequential for loop.\n for(int i = 0; i < n; i++)\n\n c[i] = a[i] + b[i];\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_2.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "STRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n #elif defined(_OPENMP)\n #warning No OpenMP simd support!\n #pragma omp parallel for\n#else\n #warning No OpenMP support!\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n //#pragma omp target teams distribute map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp parallel for simd\n#else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")\n #pragma omp parallel for simd\n#elif defined(_OPENMP)\n #warning No OpenMP simd support!\n #else\n #warning No OpenMP support!\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n //#pragma omp target teams distribute map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp parallel for simd\n#else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_2.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nint main(int argc, char * argv[])\n{\n int n = (argc > 1 ) ? atoi(argv[1]) : 1000;\n float * x = calloc(n,sizeof(float)); assert(x !=NULL);\n float * y = calloc(n,sizeof(float)); assert(y !=NULL);\n float * z0 = calloc(n,sizeof(float)); assert(z0!=NULL);\n float * z1 = calloc(n,sizeof(float)); assert(z1!=NULL);\n float * z2 = calloc(n,sizeof(float)); assert(z2!=NULL);\n\n#if 0 && defined(_OPENMP) && (_OPENMP >= 201307)\n int nthrd = omp_get_max_threads();\n int ndevs = omp_get_num_devices();\n printf(\"OpenMP threads = %d devices = %d\\n\", nthrd, ndevs);\n\n\n for (int i=0; i #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rom:c[0:n])\n #pragma omp parallel for simd\n#else\n #warning No OpenMP target/simd support!\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nint main(int argc, char * argv[])\n{\n int n = (argc > 1 ) ? atoi(argv[1]) : 1000;\n float * x = calloc(n,sizeof(float)); assert(x !=NULL);\n float * y = calloc(n,sizeof(float)); assert(y !=NULL);\n float * z0 = calloc(n,sizeof(float)); assert(z0!=NULL);\n float * z1 = calloc(n,sizeof(float)); assert(z1!=NULL);\n float * z2 = calloc(n,sizeof(float)); assert(z2!=NULL);\n\n#if 0 && defined(_OPENMP) && (_OPENMP >= 201307)\n int nthrd = omp_get_max_threads();\n int ndevs = omp_get_num_devices();\n printf(\"OpenMP threads = %d devices = %d\\n\", nthrd, ndevs);\n\n\n for (int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_memory_model.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " #pragma omp parallel\n #pragma omp master\n {\n #pragma omp task\n {\n for (int i=0; i<100000; i++) {\n #pragma omp atomic update\n x++;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_memory_model.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ragma omp task\n {\n #pragma omp target map(tofrom:x)\n {\n for (int i=0; i<100000; i++) {\n #pragma omp atomic update\n x++;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_syscall.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "sched.h>\n\n#ifdef _OPENMP\n#include \n#endif\n\n#if 1\nvoid foo(void)\n{\n #pragma omp target\n for (int i=0; i<1; i++)\n sched_yield();\n}\n\n\n#if 0\nvoid foo(void)\n{\n #pragma omp target\n #pragma omp parallel for\n for (int i=0; i<1; i++)\n printf(\"Bob W is great.\\n\");\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_syscall.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (int i=0; i<1; i++)\n sched_yield();\n}\n#endif\n\n#if 0\nvoid foo(void)\n{\n #pragma omp target\n for (int i=0; i<1; i++)\n printf(\"Bob W is great.\\n\");\n}\n\n\n#if 0\nvoid foo(void)\n{\n #pragma omp target\n #pragma omp parallel for\n for (int i=0; i<1; i++)\n puts(\"Rolf R is great\\n\");\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_syscall.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "; i++)\n printf(\"Bob W is great.\\n\");\n}\n#endif\n\n#if 0\nvoid foo(void)\n{\n #pragma omp target\n for (int i=0; i<1; i++)\n puts(\"Rolf R is great\\n\");\n}\n\n\nint main(int argc, char * argv[])\n{\n foo();\n\n printf(\"Success\\n\");\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "STRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n #elif defined(_OPENMP)\n #warning No OpenMP simd support!\n #pragma omp parallel for\n#else\n #warning No OpenMP support!\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n# if defined(__INTEL_COMPILER) && defined(__INTEL_OFFLOAD)\n #pragma omp parallel for simd\n# else\n #pragma omp teams distribute parallel for simd\n# endif\n#else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")\n #pragma omp parallel for simd\n#elif defined(_OPENMP)\n #warning No OpenMP simd support!\n #else\n #warning No OpenMP support!\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n# if defined(__INTEL_COMPILER) && defined(__INTEL_OFFLOAD)\n #pragma omp parallel for simd\n# else\n #pragma omp teams distribute parallel for simd\n# endif\n#else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "to:n,a[0:n],b[0:n]) map(from:c[0:n])\n# if defined(__INTEL_COMPILER) && defined(__INTEL_OFFLOAD)\n # else\n #pragma omp teams distribute parallel for simd\n# endif\n#else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd3(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#ifdef __cilk\n _Cilk_for(int i = 0; i < n; i++)\n#else\n #warning No Cilk support. Using sequential for loop.\n for(int i = 0; i < n; i++)\n\n c[i] = a[i] + b[i];\n} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "mp teams distribute parallel for simd\n# endif\n#else\n #warning No OpenMP target/simd support!\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd3(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#ifdef __cilk\n _Cilk_for(int i = 0; i < n; i++)\n#else\n #warning No Cilk support. Using sequential for loop.\n for(int i = 0; i < n; i++)\n\n c[i] = a[i] + b[i];\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_1.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "STRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n #elif defined(_OPENMP)\n #warning No OpenMP simd support!\n #pragma omp parallel for\n#else\n #warning No OpenMP support!\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n //#pragma omp target teams distribute map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp parallel for simd\n#else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n} #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")\n #pragma omp parallel for simd\n#elif defined(_OPENMP)\n #warning No OpenMP simd support!\n #else\n #warning No OpenMP support!\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nvoid vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)\n{\n#if defined(_OPENMP) && (_OPENMP >= 201307)\n //#pragma omp target teams distribute map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp parallel for simd\n#else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_1.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "a[0:n],b[0:n]) map(from:c[0:n])\n #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])\n #else\n #warning No OpenMP target/simd support!\n #pragma omp parallel for\n\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nint main(int argc, char * argv[])\n{\n int n = (argc > 1 ) ? atoi(argv[1]) : 1000;\n float * x = calloc(n,sizeof(float)); assert(x !=NULL);\n float * y = calloc(n,sizeof(float)); assert(y !=NULL);\n float * z0 = calloc(n,sizeof(float)); assert(z0!=NULL);\n float * z1 = calloc(n,sizeof(float)); assert(z1!=NULL);\n float * z2 = calloc(n,sizeof(float)); assert(z2!=NULL);\n\n for (int i=0; i #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/openmp/offload/test_vadd_1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rom:c[0:n])\n #pragma omp parallel for simd\n#else\n #warning No OpenMP target/simd support!\n for(int i = 0; i < n; i++)\n c[i] = a[i] + b[i];\n}\n\nint main(int argc, char * argv[])\n{\n int n = (argc > 1 ) ? atoi(argv[1]) : 1000;\n float * x = calloc(n,sizeof(float)); assert(x !=NULL);\n float * y = calloc(n,sizeof(float)); assert(y !=NULL);\n float * z0 = calloc(n,sizeof(float)); assert(z0!=NULL);\n float * z1 = calloc(n,sizeof(float)); assert(z1!=NULL);\n float * z2 = calloc(n,sizeof(float)); assert(z2!=NULL);\n\n for (int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/jeffhammond/HPCInfo/atomics/gpu/basic.cc", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ble> rd{d};\n\n#endif\n\n#define ALSO_NO 1\n\nint main(void)\n{\n const int n{10000};\n#ifdef _OPENMP\n #else\n #pragma acc parallel loop\n\n for (int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/AndreiCNitu/HPC/lattice-boltzmann/openMP/lbm.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tot_u), reduction(+:tot_cells)", "context_chars": 100, "text": "; /* accumulated magnitudes of velocity for each cell */\n\n /* loop over the cells in the grid */\n for (int jj = 0; jj < params.ny; jj++) {\n __assume_aligned(cells->speed_0, 64);\n __assume_aligned(cells->speed_1, 64);\n __assume_aligned(cells->speed_2, 64);\n __assume_aligned(cells->speed_3, 64);\n __assume_aligned(cells->speed_4, 64);\n __assume_aligned(cells->speed_5, 64);\n __assume_aligned(cells->speed_6, 64);\n __assume_aligned(cells->speed_7, 64);\n __assume_aligned(cells->speed_8, 64);\n __assume_aligned(tmp_cells->speed_0, 64);\n __assume_aligned(tmp_cells->speed_1, 64);\n __assume_aligned(tmp_cells->speed_2, 64);\n __assume_aligned(tmp_cells->speed_3, 64);\n __assume_aligned(tmp_cells->speed_4, 64);\n __assume_aligned(tmp_cells->speed_5, 64);\n __assume_aligned(tmp_cells->speed_6, 64);\n __assume_aligned(tmp_cells->speed_7, 64);\n __assume_aligned(tmp_cells->speed_8, 64);\n #pragma omp simd\n for (int ii = 0; ii < params.nx; ii++) {\n // PROPAGATION STEP:\n /* determine indices of axis-direction neighbours\n ** respecting periodic boundary conditions (wrap around) */\n const int y_n = (jj + 1) % params.ny;\n const int x_e = (ii + 1) % params.nx;\n const int y_s = (jj == 0) ? (jj + params.ny - 1) : (jj - 1);\n const int x_w = (ii == 0) ? (ii + params.nx - 1) : (ii - 1);\n\n /* propagate densities from neighbouring cells, following\n ** appropriate directions of travel and writing into\n ** scratch space grid */\n const float s0 = cells->speed_0[ii + jj*params.nx]; /* central cell, no movement */\n const float s1 = cells->speed_1[x_w + jj*params.nx]; /* east */\n const float s2 = cells->speed_2[ii + y_s*params.nx]; /* north */\n const float s3 = cells->speed_3[x_e + jj*params.nx]; /* west */\n const float s4 = cells->speed_4[ii + y_n*params.nx]; /* south */\n const float s5 = cells->speed_5[x_w + y_s*params.nx]; /* north-east */\n const float s6 = cells->speed_6[x_e + y_s*params.nx]; /* north-west */\n const float s7 = cells->speed_7[x_e + y_n*params.nx]; /* south-west */\n const float s8 = cells->speed_8[x_w + y_n*params.nx]; /* south-east */\n\n // COLLISION STEP:\n /* compute local density total */\n const float local_density = s0 + s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8;\n\n /* compute x velocity component */\n const float u_x = (s1 + s5 + s8 - (s3 + s6 + s7)) / local_density;\n\n /* compute y velocity component */\n const float u_y = (s2 + s5 + s6 - (s4 + s7 + s8)) / local_density;\n\n /* velocity squared */\n const float u_sq = u_x * u_x + u_y * u_y;\n\n /* directional velocity components */\n float u[NSPEEDS];\n u[1] = u_x; /* east */\n u[2] = u_y; /* north */\n u[3] = - u_x; /* west */\n u[4] = - u_y; /* south */\n u[5] = u_x + u_y; /* north-east */\n u[6] = - u_x + u_y; /* north-west */\n u[7] = - u_x - u_y; /* south-west */\n u[8] = u_x - u_y; /* south-east */\n\n /* equilibrium densities */\n float d_equ[NSPEEDS];\n /* zero velocity density: weight w0 */\n d_equ[0] = w0 * local_density\n * (1.f - u_sq / (2.f * c_sq));\n /* axis speeds: weight w1 */\n d_equ[1] = w1 * local_density * (1.f + u[1] / c_sq\n + (u[1] * u[1]) / (2.f * c_sq * c_sq)\n - u_sq / (2.f * c_sq));\n d_equ[2] = w1 * local_density * (1.f + u[2] / c_sq\n + (u[2] * u[2]) / (2.f * c_sq * c_sq)\n - u_sq / (2.f * c_sq));\n d_equ[3] = w1 * local_density * (1.f + u[3] / c_sq\n + (u[3] * u[3]) / (2.f * c_sq * c_sq)\n - u_sq / (2.f * c_sq));\n d_equ[4] = w1 * local_density * (1.f + u[4] / c_sq\n + (u[4] * u[4]) / (2.f * c_sq * c_sq)\n - u_sq / (2.f * c_sq));\n /* diagonal speeds: weight w2 */\n d_equ[5] = w2 * local_density * (1.f + u[5] / c_sq\n + (u[5] * u[5]) / (2.f * c_sq * c_sq)\n - u_sq / (2.f * c_sq));\n d_equ[6] = w2 * local_density * (1.f + u[6] / c_sq\n + (u[6] * u[6]) / (2.f * c_sq * c_sq)\n - u_sq / (2.f * c_sq));\n d_equ[7] = w2 * local_density * (1.f + u[7] / c_sq\n + (u[7] * u[7]) / (2.f * c_sq * c_sq)\n - u_sq / (2.f * c_sq));\n d_equ[8] = w2 * local_density * (1.f + u[8] / c_sq\n + (u[8] * u[8]) / (2.f * c_sq * c_sq)\n - u_sq / (2.f * c_sq));\n\n /* relaxation step */\n const float t0 = (obstacles[jj*params.nx + ii] != 0) ? s0 : (s0 + params.omega * (d_equ[0] - s0));\n const float t1 = (obstacles[jj*params.nx + ii] != 0) ? s3 : (s1 + params.omega * (d_equ[1] - s1));\n const float t2 = (obstacles[jj*params.nx + ii] != 0) ? s4 : (s2 + params.omega * (d_equ[2] - s2));\n const float t3 = (obstacles[jj*params.nx + ii] != 0) ? s1 : (s3 + params.omega * (d_equ[3] - s3));\n const float t4 = (obstacles[jj*params.nx + ii] != 0) ? s2 : (s4 + params.omega * (d_equ[4] - s4));\n const float t5 = (obstacles[jj*params.nx + ii] != 0) ? s7 : (s5 + params.omega * (d_equ[5] - s5));\n const float t6 = (obstacles[jj*params.nx + ii] != 0) ? s8 : (s6 + params.omega * (d_equ[6] - s6));\n const float t7 = (obstacles[jj*params.nx + ii] != 0) ? s5 : (s7 + params.omega * (d_equ[7] - s7));\n const float t8 = (obstacles[jj*params.nx + ii] != 0) ? s6 : (s8 + params.omega * (d_equ[8] - s8));\n\n // AVERAGE VELOCITIES STEP:\n /* local density total */\n const float local_density_v = t0 + t1 + t2 + t3 + t4 + t5 + t6 + t7 + t8;\n\n /* x-component of velocity */\n const float u_x_v = (t1 + t5 + t8 - (t3 + t6 + t7)) / local_density_v;\n /* compute y velocity component */\n const float u_y_v = (t2 + t5 + t6 - (t4 + t7 + t8)) / local_density_v;\n\n /* accumulate the norm of x- and y- velocity components */\n tot_u += (obstacles[jj*params.nx + ii] != 0) ? 0 : sqrtf((u_x_v * u_x_v) + (u_y_v * u_y_v));\n /* increase counter of inspected cells */\n tot_cells += (obstacles[jj*params.nx + ii] != 0) ? 0 : 1;\n\n tmp_cells->speed_0[ii + jj*params.nx] = t0;\n tmp_cells->speed_1[ii + jj*params.nx] = t1;\n tmp_cells->speed_2[ii + jj*params.nx] = t2;\n tmp_cells->speed_3[ii + jj*params.nx] = t3;\n tmp_cells->speed_4[ii + jj*params.nx] = t4;\n tmp_cells->speed_5[ii + jj*params.nx] = t5;\n tmp_cells->speed_6[ii + jj*params.nx] = t6;\n tmp_cells->speed_7[ii + jj*params.nx] = t7;\n tmp_cells->speed_8[ii + jj*params.nx] = t8;\n }\n } #pragma omp parallel for reduction(+:tot_u), reduction(+:tot_cells)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/AndreiCNitu/HPC/lattice-boltzmann/openMP/lbm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * 4.f / 9.f;\n float w1 = params->density / 9.f;\n float w2 = params->density / 36.f;\n\n for (int jj = 0; jj < params->ny; jj++) {\n for (int ii = 0; ii < params->nx; ii++) {\n /* centre */\n (*cells_ptr)->speed_0[ii + jj*params->nx] = w0;\n /* axis directions */\n (*cells_ptr)->speed_1[ii + jj*params->nx] = w1;\n (*cells_ptr)->speed_2[ii + jj*params->nx] = w1;\n (*cells_ptr)->speed_3[ii + jj*params->nx] = w1;\n (*cells_ptr)->speed_4[ii + jj*params->nx] = w1;\n /* diagonals */\n (*cells_ptr)->speed_5[ii + jj*params->nx] = w2;\n (*cells_ptr)->speed_6[ii + jj*params->nx] = w2;\n (*cells_ptr)->speed_7[ii + jj*params->nx] = w2;\n (*cells_ptr)->speed_8[ii + jj*params->nx] = w2;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/AndreiCNitu/HPC/lattice-boltzmann/openMP/lbm.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "peed_8[ii + jj*params->nx] = w2;\n }\n }\n\n /* first set all cells in obstacle array to zero */\n for (int jj = 0; jj < params->ny; jj++) {\n for (int ii = 0; ii < params->nx; ii++) {\n (*obstacles_ptr)[ii + jj*params->nx] = 0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/applying_functions.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "();\n //start parallel\n\n\n //printf(\"thread number %d\\n\",omp_get_thread_num());\n for(i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/PI_estimation.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nb_threads) private(xi) reduction(+:sum) ", "context_chars": 100, "text": "{\n long int i;\n double pi_value = 0.0;\n double sum = 0.0;\n double xi, h = (b - a) / N;\n\nfor (i = 0; i < N; i++) {\n xi = a + h*i;\n sum += f(xi);\n } #pragma omp parallel for num_threads(nb_threads) private(xi) reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/sum_of_array.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "d_parallel);\n sum = 0;\n //start parallel with reduction\n start_parallel = omp_get_wtime();\nfor (i = 0; i < n; i++) {\n //see what index each thread is working on\n //printf(\"thread %d -> %d\\n\", omp_get_thread_num(), i);\n sum += tt[i];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/matrix_addition.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "s of n obviously the speedup will remarkably increase.\n double start_parallel = omp_get_wtime();\nfor (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n //see which thread handling entry (i,j)\n //printf(\"thread %d working on (%d,%d)\\n\", omp_get_thread_num(), i, j);\n c[i][j] = a[i][j] + b[i][j];\n }\n } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/MultMat/solver.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) collapse(2) private(j, k, t, sum) shared(mat_A, mat_B, mat_C)", "context_chars": 100, "text": " t, sum;\n omp_set_dynamic(0);\n omp_set_num_threads(num_threads);\n start = omp_get_wtime();\nfor (i = 0; i < lines_a; i++)\n {\n for (j = 0; j < columns_b; j++)\n {\n sum = 0;\n for (k = 0; k < columns_a; k++)\n {\n t = (mat_A[i][k] * mat_B[k][j]);\n sum += t;\n }\n mat_C[i][j] = sum;\n }\n } #pragma omp parallel for schedule(guided) collapse(2) private(j, k, t, sum) shared(mat_A, mat_B, mat_C)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/MultMat/solver.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt));\n struct FlatArraysCouple flat_array_couple = {a, b};\n omp_set_num_threads(num_threads);\nfor (int i = 0; i < lines_a; i++)\n {\n for (int j = 0; j < columns_a; j++)\n {\n a[i * columns_a + j] = matrixA[i][j];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/MultMat/solver.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 0; j < columns_a; j++)\n {\n a[i * columns_a + j] = matrixA[i][j];\n }\n }\nfor (int i = 0; i < lines_b; i++)\n {\n for (int j = 0; j < columns_b; j++)\n {\n b[j * lines_b + i] = matrixB[i][j];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/MultMat/mpi_parallelized_scatter_gather.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) private(j) shared(a)", "context_chars": 100, "text": "rtMat_parralel(int **matrixA, int **matrixB, int *a, int *b)\n{\n int i, j;\n#pragma omp parallel\n {\nfor (i = 0; i < ci.lines_a; i++)\n {\n for (j = 0; j < ci.columns_a; j++)\n {\n a[i * ci.columns_a + j] = matrixA[i][j];\n }\n } #pragma omp parallel for schedule(guided) private(j) shared(a)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/MultMat/mpi_parallelized_scatter_gather.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) private(j) shared(b)", "context_chars": 100, "text": " = 0; j < ci.columns_a; j++)\n {\n a[i * ci.columns_a + j] = matrixA[i][j];\n }\n }\nfor (i = 0; i < ci.lines_b; i++)\n {\n for (j = 0; j < ci.columns_b; j++)\n {\n b[j * ci.lines_b + i] = matrixB[i][j];\n }\n } #pragma omp parallel for schedule(guided) private(j) shared(b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dainerx/parallel-distributed-computing-C/MultMat/mpi_send_recv.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) private(i, j) shared(actual_C, mat_A, mat_B)", "context_chars": 100, "text": ", MPI_INT, root, TAG_MASTER, MPI_COMM_WORLD, &status);\n\n// Every worker's compute its part of matC.\nfor (k = 0; k < ci.columns_b; k++)\n for (i = 0; i < rows; i++)\n {\n actual_C[i][k] = 0.0;\n for (j = 0; j < ci.columns_a; j++)\n {\n actual_C[i][k] = actual_C[i][k] + mat_A[i][j] * mat_B[j][k];\n }\n } #pragma omp parallel for schedule(guided) private(i, j) shared(actual_C, mat_A, mat_B)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV_Reordered.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "threads+1) * sizeof(int));\n int stridennz = ceil((double)submatrix.nnznum/(double)nthreads);\n for (int tid = 0; tid <= nthreads; tid++)\n {\n int boundary = tid * stridennz;\n boundary = boundary > submatrix.nnznum ? submatrix.nnznum : boundary;\n csrSplitter[tid] = binary_search_right_boundary_kernel(submatrix.rowptr, boundary, submatrix.rownum + 1) - 1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV_Reordered.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " q++)\n {\n MPI_Barrier(MPI_COMM_WORLD);\n if(id==0) gettimeofday(&t1, NULL);\n for (int tid = 0; tid < nthreads; tid++)\n {\n for (int u = csrSplitter[tid]; u < csrSplitter[tid+1]; u++)\n {\n y[u] = 0;\n for (int h = submatrix.rowptr[u]; h < submatrix.rowptr[u+1]; h++)\n {\n y[u] += submatrix.val[h] * x[submatrix.colidx[h]];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV_Reordered.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "usec - t1.tv_usec) / 1000.0);\n }\n // for (int q = 0; q < NTIMES; q++)\n // {\n // // for (int i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV_Balanced.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ads+1) * sizeof(int));\n int stridennz = ceil((double)sub_local_mtx.nnznum/(double)nthreads);\n for (int tid = 0; tid <= nthreads; tid++)\n {\n int boundary = tid * stridennz;\n boundary = boundary > sub_local_mtx.nnznum ? sub_local_mtx.nnznum : boundary;\n csrSplitter[tid] = binary_search_right_boundary_kernel(sub_local_mtx.rowptr, boundary, sub_local_mtx.rownum + 1) - 1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV_Balanced.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " MPI_Barrier(MPI_COMM_WORLD);\n if(id==0) gettimeofday(&t1,NULL);\n// Compute local spmv\n// // for (int i = 0; i < sub_local_mtx.rownum; i++)\n// {\n// local_y[i]=0;\n// for (int j = sub_local_mtx.rowptr[i]; j < sub_local_mtx.rowptr[i + 1]; j++)\n// {\n// local_y[i] += sub_local_mtx.val[j] * x[sub_local_mtx.colidx[j]];\n// }\n// } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV_Balanced.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "al_y[i] += sub_local_mtx.val[j] * x[sub_local_mtx.colidx[j]];\n// }\n// }\n for (int tid = 0; tid < nthreads; tid++)\n {\n for (int u = csrSplitter[tid]; u < csrSplitter[tid+1]; u++)\n {\n local_y[u] = 0;\n for (int h = sub_local_mtx.rowptr[u]; h < sub_local_mtx.rowptr[u+1]; h++)\n {\n local_y[u] += sub_local_mtx.val[h] * x[sub_local_mtx.colidx[h]];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV_Balanced.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ur_time;\n }\n }\n\n stridennz = ceil((double)sub_remote_mtx.nnznum/(double)nthreads);\n for (int tid = 0; tid <= nthreads; tid++)\n {\n int boundary = tid * stridennz;\n boundary = boundary > sub_remote_mtx.nnznum ? sub_remote_mtx.nnznum : boundary;\n csrSplitter[tid] = binary_search_right_boundary_kernel(sub_remote_mtx.rowptr, boundary, sub_remote_mtx.rownum + 1) - 1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV_Balanced.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "dary, sub_remote_mtx.rownum + 1) - 1;\n }\n\n for (int iter = 0; iter < NTIMES; iter++)\n {\n// // for (int i = 0; i < sub_remote_mtx.rownum; i++) {\n// remote_y[i]=0;\n// for (int j = sub_remote_mtx.rowptr[i]; j < sub_remote_mtx.rowptr[i + 1]; j++)\n// remote_y[i] += sub_remote_mtx.val[j] * x[sub_remote_mtx.colidx[j]];\n// } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV_Balanced.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "// }\n MPI_Barrier(MPI_COMM_WORLD);\n if(id==0) gettimeofday(&t1,NULL);\n for (int tid = 0; tid < nthreads; tid++)\n {\n for (int u = csrSplitter[tid]; u < csrSplitter[tid+1]; u++)\n {\n remote_y[u] = 0;\n for (int h = sub_remote_mtx.rowptr[u]; h < sub_remote_mtx.rowptr[u+1]; h++)\n {\n remote_y[u] += sub_remote_mtx.val[h] * x[sub_remote_mtx.colidx[h]];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "threads+1) * sizeof(int));\n int stridennz = ceil((double)submatrix.nnznum/(double)nthreads);\n for (int tid = 0; tid <= nthreads; tid++)\n {\n int boundary = tid * stridennz;\n boundary = boundary > submatrix.nnznum ? submatrix.nnznum : boundary;\n csrSplitter[tid] = binary_search_right_boundary_kernel(submatrix.rowptr, boundary, submatrix.rownum + 1) - 1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " q++)\n {\n MPI_Barrier(MPI_COMM_WORLD);\n if(id==0) gettimeofday(&t1, NULL);\n for (int tid = 0; tid < nthreads; tid++)\n {\n for (int u = csrSplitter[tid]; u < csrSplitter[tid+1]; u++)\n {\n y[u] = 0;\n for (int h = submatrix.rowptr[u]; h < submatrix.rowptr[u+1]; h++)\n {\n y[u] += submatrix.val[h] * x[submatrix.colidx[h]];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/xxyux/Distributed-SpMV/DistSpMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "usec - t1.tv_usec) / 1000.0);\n }\n // for (int q = 0; q < NTIMES; q++)\n // {\n // // for (int i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/jurassic/src/jurassic.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(ctl,tbl,id,nu,f,n,dnu)", "context_chars": 100, "text": "1; i < n; i++)\n dnu = MIN(dnu, nu[i] - nu[i - 1]);\n\n /* Compute source function table... */\nfor (int it = 0; it < TBLNS; it++) {\n\n /* Set temperature... */\n tbl->st[it] = LIN(0.0, TMIN, TBLNS - 1.0, TMAX, (double) it);\n\n /* Integrate Planck function... */\n double fsum = tbl->sr[it][id] = 0;\n for (double fnu = nu[0]; fnu <= nu[n - 1]; fnu += dnu) {\n\tint i = locate_irr(nu, n, fnu);\n\tdouble ff = LIN(nu[i], f[i], nu[i + 1], f[i + 1], fnu);\n\tfsum += ff;\n\ttbl->sr[it][id] += ff * planck(tbl->st[it], fnu);\n }\n tbl->sr[it][id] /= fsum;\n } #pragma omp parallel for default(none) shared(ctl,tbl,id,nu,f,n,dnu)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/jurassic/src/jurassic.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(ctl,atm,obs,k,x0,yy0,n,m,iqa) private(x1, yy1, atm1, obs1)", "context_chars": 100, "text": "nitialize kernel matrix... */\n gsl_matrix_set_zero(k);\n\n /* Loop over state vector elements... */\nfor (size_t j = 0; j < n; j++) {\n\n /* Allocate... */\n x1 = gsl_vector_alloc(n);\n yy1 = gsl_vector_alloc(m);\n ALLOC(atm1, atm_t, 1);\n ALLOC(obs1, obs_t, 1);\n\n /* Set perturbation size... */\n double h;\n if (iqa[j] == IDXP)\n h = MAX(fabs(0.01 * gsl_vector_get(x0, j)), 1e-7);\n else if (iqa[j] == IDXT)\n h = 1.0;\n else if (iqa[j] >= IDXQ(0) && iqa[j] < IDXQ(ctl->ng))\n h = MAX(fabs(0.01 * gsl_vector_get(x0, j)), 1e-15);\n else if (iqa[j] >= IDXK(0) && iqa[j] < IDXK(ctl->nw))\n h = 1e-4;\n else if (iqa[j] == IDXCLZ || iqa[j] == IDXCLDZ)\n h = 1.0;\n else if (iqa[j] >= IDXCLK(0) && iqa[j] < IDXCLK(ctl->ncl))\n h = 1e-4;\n else if (iqa[j] == IDXSFZ)\n h = 0.1;\n else if (iqa[j] == IDXSFP)\n h = 10.0;\n else if (iqa[j] == IDXSFT)\n h = 1.0;\n else if (iqa[j] >= IDXSFEPS(0) && iqa[j] < IDXSFEPS(ctl->nsf))\n h = 1e-2;\n else\n ERRMSG(\"Cannot set perturbation size!\");\n\n /* Disturb state vector element... */\n gsl_vector_memcpy(x1, x0);\n gsl_vector_set(x1, j, gsl_vector_get(x1, j) + h);\n copy_atm(ctl, atm1, atm, 0);\n copy_obs(ctl, obs1, obs, 0);\n x2atm(ctl, x1, atm1);\n\n /* Compute radiance for disturbed atmospheric data... */\n formod(ctl, atm1, obs1);\n\n /* Compose measurement vector for disturbed radiance data... */\n obs2y(ctl, obs1, yy1, NULL, NULL);\n\n /* Compute derivatives... */\n for (size_t i = 0; i < m; i++)\n gsl_matrix_set(k, i, j,\n\t\t (gsl_vector_get(yy1, i) - gsl_vector_get(yy0, i)) / h);\n\n /* Free... */\n gsl_vector_free(x1);\n gsl_vector_free(yy1);\n free(atm1);\n free(obs1);\n } #pragma omp parallel for default(none) shared(ctl,atm,obs,k,x0,yy0,n,m,iqa) private(x1, yy1, atm1, obs1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "ead binary data... */\n cms_sol_t *sol = cms_read_sol(cms_ptr, inout);\n\n /* Evaluate... */\nfor (size_t ix = 0; ix < nx; ix++)\n\tfor (size_t iy = 0; iy < ny; iy++) {\n\t double val, x[] = { lon[ix], lat[iy] };\n\t cms_eval(cms_ptr, sol, x, &val);\n\t array[ARRAY_3D(ix, iy, ny, ip, np)] = (float) val;\n\t} #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "\t inout);\n FREAD(sarray, unsigned short,\n\t nxy * nz,\n\t inout);\n\n /* Convert to float... */\nfor (size_t ixy = 0; ixy < nxy; ixy++)\n for (size_t iz = 0; iz < nz; iz++)\n\tarray[ixy * nz + iz]\n\t = (float) (sarray[ixy * nz + iz] * scl[iz] + off[iz]);\n }\n\n /* Compress array and output compressed stream... */\n else {\n\n /* Write info... */\n LOG(2, \"Write 3-D variable: %s (pack, RATIO= %g %%)\",\n\tvarname, 100. * sizeof(unsigned short) / sizeof(float));\n\n /* Get range... */\n for (size_t iz = 0; iz < nz; iz++) {\n min[iz] = array[iz];\n max[iz] = array[iz];\n }\n for (size_t ixy = 1; ixy < nxy; ixy++)\n for (size_t iz = 0; iz < nz; iz++) {\n\tif (array[ixy * nz + iz] < min[iz])\n\t min[iz] = array[ixy * nz + iz];\n\tif (array[ixy * nz + iz] > max[iz])\n\t max[iz] = array[ixy * nz + iz];\n }\n\n /* Get offset and scaling factor... */\n for (size_t iz = 0; iz < nz; iz++) {\n scl[iz] = (max[iz] - min[iz]) / 65533.;\n off[iz] = min[iz];\n }\n\n /* Convert to short... */\n#pragma omp parallel for default(shared)\n for (size_t ixy = 0; ixy < nxy; ixy++)\n for (size_t iz = 0; iz < nz; iz++)\n\tif (scl[iz] != 0)\n\t sarray[ixy * nz + iz] = (unsigned short)\n\t ((array[ixy * nz + iz] - off[iz]) / scl[iz] + .5);\n\telse\n\t sarray[ixy * nz + iz] = 0;\n\n /* Write data... */\n FWRITE(&scl, double,\n\t nz,\n\t inout);\n FWRITE(&off, double,\n\t nz,\n\t inout);\n FWRITE(sarray, unsigned short,\n\t nxy * nz,\n\t inout);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "l[iz] = (max[iz] - min[iz]) / 65533.;\n off[iz] = min[iz];\n }\n\n /* Convert to short... */\nfor (size_t ixy = 0; ixy < nxy; ixy++)\n for (size_t iz = 0; iz < nz; iz++)\n\tif (scl[iz] != 0)\n\t sarray[ixy * nz + iz] = (unsigned short)\n\t ((array[ixy * nz + iz] - off[iz]) / scl[iz] + .5);\n\telse\n\t sarray[ixy * nz + iz] = 0;\n\n /* Write data... */\n FWRITE(&scl, double,\n\t nz,\n\t inout);\n FWRITE(&off, double,\n\t nz,\n\t inout);\n FWRITE(sarray, unsigned short,\n\t nxy * nz,\n\t inout);\n }\n\n /* Free... */\n free(sarray);\n}\n\n/*****************************************************************************/\n\n#ifdef ZFP\nvoid compress_zfp(\n char *varname,\n float *array,\n int nx,\n int ny,\n int nz,\n int precision,\n double tolerance,\n int decompress,\n FILE * inout) {\n\n zfp_type type;\t\t/* array scalar type */\n zfp_field *field;\t\t/* array meta data */\n zfp_stream *zfp;\t\t/* compressed stream */\n void *buffer;\t\t\t/* storage for compressed stream */\n size_t bufsize;\t\t/* byte size of compressed buffer */\n bitstream *stream;\t\t/* bit stream to write to or read from */\n size_t zfpsize;\t\t/* byte size of compressed stream */\n\n /* Allocate meta data for the 3D array a[nz][ny][nx]... */\n type = zfp_type_float;\n field = zfp_field_3d(array, type, (uint) nx, (uint) ny, (uint) nz);\n\n /* Allocate meta data for a compressed stream... */\n zfp = zfp_stream_open(NULL);\n\n /* Set compression mode... */\n int actual_prec = 0;\n double actual_tol = 0;\n if (precision > 0)\n actual_prec = (int) zfp_stream_set_precision(zfp, (uint) precision);\n else if (tolerance > 0)\n actual_tol = zfp_stream_set_accuracy(zfp, tolerance);\n else\n ERRMSG(\"Set precision or tolerance!\");\n\n /* Allocate buffer for compressed data... */\n bufsize = zfp_stream_maximum_size(zfp, field);\n buffer = malloc(bufsize);\n\n /* Associate bit stream with allocated buffer... */\n stream = stream_open(buffer, bufsize);\n zfp_stream_set_bit_stream(zfp, stream);\n zfp_stream_rewind(zfp);\n\n /* Read compressed stream and decompress array... */\n if (decompress) {\n FREAD(&zfpsize, size_t,\n\t 1,\n\t inout);\n if (fread(buffer, 1, zfpsize, inout) != zfpsize)\n ERRMSG(\"Error while reading zfp data!\");\n if (!zfp_decompress(zfp, field)) {\n ERRMSG(\"Decompression failed!\");\n }\n LOG(2, \"Read 3-D variable: %s \"\n\t\"(zfp, PREC= %d, TOL= %g, RATIO= %g %%)\",\n\tvarname, actual_prec, actual_tol,\n\t(100. * (double) zfpsize) / (double) (nx * ny * nz));\n }\n\n /* Compress array and output compressed stream... */\n else {\n zfpsize = zfp_compress(zfp, field);\n if (!zfpsize) {\n ERRMSG(\"Compression failed!\");\n } else {\n FWRITE(&zfpsize, size_t,\n\t 1,\n\t inout);\n if (fwrite(buffer, 1, zfpsize, inout) != zfpsize)\n\tERRMSG(\"Error while writing zfp data!\");\n }\n LOG(2, \"Write 3-D variable: %s \"\n\t\"(zfp, PREC= %d, TOL= %g, RATIO= %g %%)\",\n\tvarname, actual_prec, actual_tol,\n\t(100. * (double) zfpsize) / (double) (nx * ny * nz));\n }\n\n /* Free... */\n zfp_field_free(field);\n zfp_stream_close(zfp);\n stream_close(stream);\n free(buffer);\n} #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(ci,cw)", "context_chars": 100, "text": "s,\n double *zs) {\n\n INTPOL_INIT;\n\n ctl->met_tropo = met_tropo;\n read_met_tropo(ctl, clim, met);\nfor (int ix = 0; ix < nx; ix++)\n for (int iy = 0; iy < ny; iy++) {\n intpol_met_space_2d(met, met->pt, lons[ix], lats[iy],\n\t\t\t &pt[iy * nx + ix], ci, cw, 1);\n intpol_met_space_2d(met, met->ps, lons[ix], lats[iy],\n\t\t\t &ps[iy * nx + ix], ci, cw, 0);\n intpol_met_space_2d(met, met->zs, lons[ix], lats[iy],\n\t\t\t &zs[iy * nx + ix], ci, cw, 0);\n intpol_met_space_3d(met, met->z, pt[iy * nx + ix], lons[ix],\n\t\t\t lats[iy], &zt[iy * nx + ix], ci, cw, 1);\n intpol_met_space_3d(met, met->t, pt[iy * nx + ix], lons[ix],\n\t\t\t lats[iy], &tt[iy * nx + ix], ci, cw, 0);\n intpol_met_space_3d(met, met->h2o, pt[iy * nx + ix], lons[ix],\n\t\t\t lats[iy], &qt[iy * nx + ix], ci, cw, 0);\n intpol_met_space_3d(met, met->o3, pt[iy * nx + ix], lons[ix],\n\t\t\t lats[iy], &o3t[iy * nx + ix], ci, cw, 0);\n } #pragma omp parallel for default(shared) private(ci,cw)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "tm_t * atm) {\n\n /* Initialize pressure consistent with zeta... */\n if (ctl->vert_coord_ap == 1) {\nfor (int ip = 0; ip < atm->np; ip++) {\n INTPOL_INIT;\n intpol_met_4d_coord(met0, met0->zetal, met0->pl, met1, met1->zetal,\n\t\t\t met1->pl, atm->time[ip], atm->q[ctl->qnt_zeta][ip],\n\t\t\t atm->lon[ip], atm->lat[ip], &atm->p[ip], ci, cw, 1);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": " (ctl->chemgrid_lat1 - ctl->chemgrid_lat0) / ctl->chemgrid_ny;\n\n /* Set vertical coordinates... */\nfor (int iz = 0; iz < ctl->chemgrid_nz; iz++) {\n z[iz] = ctl->chemgrid_z0 + dz * (iz + 0.5);\n press[iz] = P(z[iz]);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": " double t0 = tt - 0.5 * ctl->dt_mod;\n double t1 = tt + 0.5 * ctl->dt_mod;\n\n /* Get indices... */\nfor (int ip = 0; ip < atm->np; ip++) {\n ixs[ip] = (int) ((atm->lon[ip] - ctl->chemgrid_lon0) / dlon);\n iys[ip] = (int) ((atm->lat[ip] - ctl->chemgrid_lat0) / dlat);\n izs[ip] = (int) ((Z(atm->p[ip]) - ctl->chemgrid_z0) / dz);\n if (atm->time[ip] < t0 || atm->time[ip] > t1\n\t|| ixs[ip] < 0 || ixs[ip] >= ctl->chemgrid_nx\n\t|| iys[ip] < 0 || iys[ip] >= ctl->chemgrid_ny\n\t|| izs[ip] < 0 || izs[ip] >= ctl->chemgrid_nz)\n izs[ip] = -1;\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "for (int ix = 0; ix < ctl->chemgrid_nx; ix++)\n lon[ix] = ctl->chemgrid_lon0 + dlon * (ix + 0.5);\nfor (int iy = 0; iy < ctl->chemgrid_ny; iy++) {\n lat[iy] = ctl->chemgrid_lat0 + dlat * (iy + 0.5);\n area[iy] = dlat * dlon * SQR(RE * M_PI / 180.)\n * cos(lat[iy] * M_PI / 180.);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "zs[ip], ctl->chemgrid_nz)]\n\t+= atm->q[ctl->qnt_m][ip];\n\n /* Assign grid data to air parcels ... */\nfor (int ip = 0; ip < atm->np; ip++)\n if (izs[ip] >= 0) {\n\n /* Interpolate temperature... */\n double temp;\n INTPOL_INIT;\n intpol_met_time_3d(met0, met0->t, met1, met1->t, tt, press[izs[ip]],\n\t\t\t lon[ixs[ip]], lat[iys[ip]], &temp, ci, cw, 1);\n\n /* Set mass... */\n double m = mass[ARRAY_3D(ixs[ip], iys[ip], ctl->chemgrid_ny,\n\t\t\t izs[ip], ctl->chemgrid_nz)];\n\n /* Calculate volume mixing ratio... */\n atm->q[ctl->qnt_Cx][ip] = MA / ctl->molmass * m\n\t/ (1e9 * RHO(press[izs[ip]], temp) * area[iys[ip]] * dz);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "module_chem_init(\n ctl_t * ctl,\n clim_t * clim,\n met_t * met0,\n met_t * met1,\n atm_t * atm) {\n\nfor (int ip = 0; ip < atm->np; ip++) {\n\n /* Set H2O and O3 using meteo data... */\n INTPOL_INIT;\n if (ctl->qnt_Ch2o >= 0) {\n double h2o;\n INTPOL_3D(h2o, 1);\n SET_ATM(qnt_Ch2o, h2o);\n }\n if (ctl->qnt_Co3 >= 0) {\n double o3;\n INTPOL_3D(o3, 1);\n SET_ATM(qnt_Co3, o3);\n }\n\n /* Set radical species... */\n SET_ATM(qnt_Coh, clim_oh(ctl, clim, atm->time[ip],\n\t\t\t atm->lon[ip], atm->lat[ip], atm->p[ip]));\n SET_ATM(qnt_Cho2, clim_zm(&clim->ho2, atm->time[ip],\n\t\t\t atm->lat[ip], atm->p[ip]));\n SET_ATM(qnt_Ch2o2, clim_zm(&clim->h2o2, atm->time[ip],\n\t\t\t atm->lat[ip], atm->p[ip]));\n SET_ATM(qnt_Co1d, clim_zm(&clim->o1d, atm->time[ip],\n\t\t\t atm->lat[ip], atm->p[ip]));\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": " acc data present(ctl,clim,atm,ixs,iys,izs)\n#pragma acc parallel loop independent gang vector\n#else\nfor (int ip = 0; ip < np; ip++) {\n ixs[ip] = (int) ((atm->lon[ip] - ctl->mixing_lon0) / dlon);\n iys[ip] = (int) ((atm->lat[ip] - ctl->mixing_lat0) / dlat);\n izs[ip] = (int) ((Z(atm->p[ip]) - ctl->mixing_z0) / dz);\n if (atm->time[ip] < t0 || atm->time[ip] > t1\n\t|| ixs[ip] < 0 || ixs[ip] >= ctl->mixing_nx\n\t|| iys[ip] < 0 || iys[ip] >= ctl->mixing_ny\n\t|| izs[ip] < 0 || izs[ip] >= ctl->mixing_nz)\n izs[ip] = -1;\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "#pragma acc parallel loop independent gang vector\n#else\n#ifdef __NVCOMPILER\n#pragma novector\n#endif\nfor (int i = 0; i < ngrid; i++) {\n count[i] = 0;\n cmean[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "#pragma acc parallel loop independent gang vector\n#else\n#ifdef __NVCOMPILER\n#pragma novector\n#endif\nfor (int i = 0; i < ngrid; i++)\n if (count[i] > 0)\n cmean[i] /= count[i];\n\n /* Calculate interparcel mixing... */\n#ifdef _OPENACC\n#pragma acc parallel loop independent gang vector\n#else\n#pragma omp parallel for\n\n for (int ip = 0; ip < np; ip++)\n if (izs[ip] >= 0) {\n\n /* Set mixing parameter... */\n double mixparam = 1.0;\n if (ctl->mixing_trop < 1 || ctl->mixing_strat < 1) {\n\tdouble w =\n\t tropo_weight(clim, atm->time[ip], atm->lat[ip], atm->p[ip]);\n\tmixparam = w * ctl->mixing_trop + (1 - w) * ctl->mixing_strat;\n }\n\n /* Adjust quantity... */\n atm->q[qnt_idx][ip] +=\n\t(cmean\n\t [ARRAY_3D(ixs[ip], iys[ip], ctl->mixing_ny, izs[ip], ctl->mixing_nz)]\n\t - atm->q[qnt_idx][ip]) * mixparam;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "te interparcel mixing... */\n#ifdef _OPENACC\n#pragma acc parallel loop independent gang vector\n#else\nfor (int ip = 0; ip < np; ip++)\n if (izs[ip] >= 0) {\n\n /* Set mixing parameter... */\n double mixparam = 1.0;\n if (ctl->mixing_trop < 1 || ctl->mixing_strat < 1) {\n\tdouble w =\n\t tropo_weight(clim, atm->time[ip], atm->lat[ip], atm->p[ip]);\n\tmixparam = w * ctl->mixing_trop + (1 - w) * ctl->mixing_strat;\n }\n\n /* Adjust quantity... */\n atm->q[qnt_idx][ip] +=\n\t(cmean\n\t [ARRAY_3D(ixs[ip], iys[ip], ctl->mixing_ny, izs[ip], ctl->mixing_nz)]\n\t - atm->q[qnt_idx][ip]) * mixparam;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "erators... */\n if (ctl->rng_type == 0) {\n\n /* Uniform distribution... */\n if (method == 0) {\nfor (size_t i = 0; i < n; ++i)\n\trs[i] = gsl_rng_uniform(rng[omp_get_thread_num()]);\n }\n\n /* Normal distribution... */\n else if (method == 1) {\n#pragma omp parallel for default(shared)\n for (size_t i = 0; i < n; ++i)\n\trs[i] = gsl_ran_gaussian_ziggurat(rng[omp_get_thread_num()], 1.0);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "orm(rng[omp_get_thread_num()]);\n }\n\n /* Normal distribution... */\n else if (method == 1) {\nfor (size_t i = 0; i < n; ++i)\n\trs[i] = gsl_ran_gaussian_ziggurat(rng[omp_get_thread_num()], 1.0);\n }\n\n /* Update of random numbers on device... */\n#ifdef _OPENACC\n#pragma acc update device(rs[:n])\n\n }\n\n /* Use Squares random number generator (Widynski, 2022)... */\n else if (ctl->rng_type == 1) {\n\n /* Set key (don't change this!)... */\n const uint64_t key = 0xc8e4fd154ce32f6d;\n\n /* Uniform distribution... */\n#ifdef _OPENACC\n#pragma acc data present(rs)\n#pragma acc parallel loop independent gang vector\n#else\n#pragma omp parallel for default(shared)\n\n for (size_t i = 0; i < n + 1; ++i) {\n uint64_t r, t, x, y, z;\n y = x = (rng_ctr + i) * key;\n z = y + key;\n x = x * x + y;\n x = (x >> 32) | (x << 32);\n x = x * x + z;\n x = (x >> 32) | (x << 32);\n x = x * x + y;\n x = (x >> 32) | (x << 32);\n t = x = x * x + z;\n x = (x >> 32) | (x << 32);\n r = t ^ ((x * x + y) >> 32);\n rs[i] = (double) r / (double) UINT64_MAX;\n }\n rng_ctr += n + 1;\n\n /* Normal distribution... */\n if (method == 1) {\n#ifdef _OPENACC\n#pragma acc parallel loop independent gang vector\n#else\n#pragma omp parallel for default(shared)\n\n for (size_t i = 0; i < n; i += 2) {\n\tdouble r = sqrt(-2.0 * log(rs[i]));\n\tdouble phi = 2.0 * M_PI * rs[i + 1];\n\trs[i] = r * cosf((float) phi);\n\trs[i + 1] = r * sinf((float) phi);\n }\n }\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "ifdef _OPENACC\n#pragma acc data present(rs)\n#pragma acc parallel loop independent gang vector\n#else\nfor (size_t i = 0; i < n + 1; ++i) {\n uint64_t r, t, x, y, z;\n y = x = (rng_ctr + i) * key;\n z = y + key;\n x = x * x + y;\n x = (x >> 32) | (x << 32);\n x = x * x + z;\n x = (x >> 32) | (x << 32);\n x = x * x + y;\n x = (x >> 32) | (x << 32);\n t = x = x * x + z;\n x = (x >> 32) | (x << 32);\n r = t ^ ((x * x + y) >> 32);\n rs[i] = (double) r / (double) UINT64_MAX;\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": ". */\n if (method == 1) {\n#ifdef _OPENACC\n#pragma acc parallel loop independent gang vector\n#else\nfor (size_t i = 0; i < n; i += 2) {\n\tdouble r = sqrt(-2.0 * log(rs[i]));\n\tdouble phi = 2.0 * M_PI * rs[i + 1];\n\trs[i] = r * cosf((float) phi);\n\trs[i + 1] = r * sinf((float) phi);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "f\n\n /* Get box index... */\n#ifdef _OPENACC\n#pragma acc parallel loop independent gang vector\n#else\nfor (int ip = 0; ip < np; ip++) {\n a[ip] =\n (double) ((locate_reg(met0->lon, met0->nx, atm->lon[ip]) * met0->ny +\n\t\t locate_reg(met0->lat, met0->ny, atm->lat[ip]))\n\t\t* met0->np + locate_irr(met0->p, met0->np, atm->p[ip]));\n p[ip] = ip;\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "p[0:np])\n#pragma acc data present(a,p,help)\n#pragma acc parallel loop independent gang vector\n#else\nfor (int ip = 0; ip < np; ip++)\n help[ip] = a[p[ip]];\n#ifdef _OPENACC\n#pragma acc parallel loop independent gang vector\n#else\n#pragma omp parallel for default(shared)\n\n for (int ip = 0; ip < np; ip++)\n a[ip] = help[ip];\n\n /* Free... */\n#ifdef _OPENACC\n#pragma acc exit data delete(help)\n\n free(help);\n}\n\n/*****************************************************************************/\n\nvoid module_timesteps(\n ctl_t * ctl,\n met_t * met0,\n atm_t * atm,\n double *dt,\n double t) {\n\n /* Set timer... */\n SELECT_TIMER(\"MODULE_TIMESTEPS\", \"PHYSICS\", NVTX_GPU);\n\n const double latmin = gsl_stats_min(met0->lat, 1, (size_t) met0->ny),\n latmax = gsl_stats_max(met0->lat, 1, (size_t) met0->ny);\n\n const int local =\n (fabs(met0->lon[met0->nx - 1] - met0->lon[0] - 360.0) >= 0.01);\n\n /* Loop over particles... */\n PARTICLE_LOOP(0, atm->np, 0, \"acc data present(ctl,atm,met0,dt)\") {\n\n /* Set time step for each air parcel... */\n if ((ctl->direction * (atm->time[ip] - ctl->t_start) >= 0\n\t && ctl->direction * (atm->time[ip] - ctl->t_stop) <= 0\n\t && ctl->direction * (atm->time[ip] - t) < 0))\n dt[ip] = t - atm->time[ip];\n else\n dt[ip] = 0.0;\n\n /* Check horizontal boundaries of local meteo data... */\n if (local && (atm->lon[ip] <= met0->lon[0]\n\t\t || atm->lon[ip] >= met0->lon[met0->nx - 1]\n\t\t || atm->lat[ip] <= latmin || atm->lat[ip] >= latmax))\n dt[ip] = 0.0;\n }\n} #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "+)\n help[ip] = a[p[ip]];\n#ifdef _OPENACC\n#pragma acc parallel loop independent gang vector\n#else\nfor (int ip = 0; ip < np; ip++)\n a[ip] = help[ip];\n\n /* Free... */\n#ifdef _OPENACC\n#pragma acc exit data delete(help)\n\n free(help);\n}\n\n/*****************************************************************************/\n\nvoid module_timesteps(\n ctl_t * ctl,\n met_t * met0,\n atm_t * atm,\n double *dt,\n double t) {\n\n /* Set timer... */\n SELECT_TIMER(\"MODULE_TIMESTEPS\", \"PHYSICS\", NVTX_GPU);\n\n const double latmin = gsl_stats_min(met0->lat, 1, (size_t) met0->ny),\n latmax = gsl_stats_max(met0->lat, 1, (size_t) met0->ny);\n\n const int local =\n (fabs(met0->lon[met0->nx - 1] - met0->lon[0] - 360.0) >= 0.01);\n\n /* Loop over particles... */\n PARTICLE_LOOP(0, atm->np, 0, \"acc data present(ctl,atm,met0,dt)\") {\n\n /* Set time step for each air parcel... */\n if ((ctl->direction * (atm->time[ip] - ctl->t_start) >= 0\n\t && ctl->direction * (atm->time[ip] - ctl->t_stop) <= 0\n\t && ctl->direction * (atm->time[ip] - t) < 0))\n dt[ip] = t - atm->time[ip];\n else\n dt[ip] = 0.0;\n\n /* Check horizontal boundaries of local meteo data... */\n if (local && (atm->lon[ip] <= met0->lon[0]\n\t\t || atm->lon[ip] >= met0->lon[met0->nx - 1]\n\t\t || atm->lat[ip] <= latmin || atm->lat[ip] >= latmax))\n dt[ip] = 0.0;\n }\n} #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": " ERRMSG(\"MPTRAC was compiled without cmultiscale compression!\");\n#endif\n }\n\n /* Copy data... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n for (int ip = 0; ip < met->np; ip++) {\n\tvar[ix][iy][ip] = help[ARRAY_3D(ix, iy, met->ny, ip, met->np)];\n\tif (var[ix][iy][ip] < bound_min)\n\t var[ix][iy][ip] = bound_min;\n\telse if (var[ix][iy][ip] > bound_max)\n\t var[ix][iy][ip] = bound_max;\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": "... */\n const double pfac = 1.01439, dz0 = RI / MA / G0 * log(pfac);\n\n /* Loop over columns... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Get potential temperature and water vapor at lowest 50 hPa... */\n int n = 0;\n double h2o = 0, t, theta = 0;\n double pbot = MIN(met->ps[ix][iy], met->p[0]);\n double ptop = pbot - 50.;\n for (int ip = 0; ip < met->np; ip++) {\n\tif (met->p[ip] <= pbot) {\n\t theta += THETA(met->p[ip], met->t[ix][iy][ip]);\n\t h2o += met->h2o[ix][iy][ip];\n\t n++;\n\t}\n\tif (met->p[ip] < ptop && n > 0)\n\t break;\n }\n theta /= n;\n h2o /= n;\n\n /* Cannot compute anything if water vapor is missing... */\n met->plcl[ix][iy] = NAN;\n met->plfc[ix][iy] = NAN;\n met->pel[ix][iy] = NAN;\n met->cape[ix][iy] = NAN;\n met->cin[ix][iy] = NAN;\n if (h2o <= 0)\n\tcontinue;\n\n /* Find lifted condensation level (LCL)... */\n ptop = P(20.);\n pbot = met->ps[ix][iy];\n do {\n\tmet->plcl[ix][iy] = (float) (0.5 * (pbot + ptop));\n\tt = theta / pow(1000. / met->plcl[ix][iy], 0.286);\n\tif (RH(met->plcl[ix][iy], t, h2o) > 100.)\n\t ptop = met->plcl[ix][iy];\n\telse\n\t pbot = met->plcl[ix][iy];\n } while (pbot - ptop > 0.1);\n\n /* Calculate CIN up to LCL... */\n INTPOL_INIT;\n double dcape, dz, h2o_env, t_env;\n double p = met->ps[ix][iy];\n met->cape[ix][iy] = met->cin[ix][iy] = 0;\n do {\n\tdz = dz0 * TVIRT(t, h2o);\n\tp /= pfac;\n\tt = theta / pow(1000. / p, 0.286);\n\tintpol_met_space_3d(met, met->t, p, met->lon[ix], met->lat[iy],\n\t\t\t &t_env, ci, cw, 1);\n\tintpol_met_space_3d(met, met->h2o, p, met->lon[ix], met->lat[iy],\n\t\t\t &h2o_env, ci, cw, 0);\n\tdcape = 1e3 * G0 * (TVIRT(t, h2o) - TVIRT(t_env, h2o_env)) /\n\t TVIRT(t_env, h2o_env) * dz;\n\tif (dcape < 0)\n\t met->cin[ix][iy] += fabsf((float) dcape);\n } while (p > met->plcl[ix][iy]);\n\n /* Calculate level of free convection (LFC), equilibrium level (EL),\n and convective available potential energy (CAPE)... */\n dcape = 0;\n p = met->plcl[ix][iy];\n t = theta / pow(1000. / p, 0.286);\n ptop = 0.75 * clim_tropo(clim, met->time, met->lat[iy]);\n do {\n\tdz = dz0 * TVIRT(t, h2o);\n\tp /= pfac;\n\tt -= lapse_rate(t, h2o) * dz;\n\tdouble psat = PSAT(t);\n\th2o = psat / (p - (1. - EPS) * psat);\n\tintpol_met_space_3d(met, met->t, p, met->lon[ix], met->lat[iy],\n\t\t\t &t_env, ci, cw, 1);\n\tintpol_met_space_3d(met, met->h2o, p, met->lon[ix], met->lat[iy],\n\t\t\t &h2o_env, ci, cw, 0);\n\tdouble dcape_old = dcape;\n\tdcape = 1e3 * G0 * (TVIRT(t, h2o) - TVIRT(t_env, h2o_env)) /\n\t TVIRT(t_env, h2o_env) * dz;\n\tif (dcape > 0) {\n\t met->cape[ix][iy] += (float) dcape;\n\t if (!isfinite(met->plfc[ix][iy]))\n\t met->plfc[ix][iy] = (float) p;\n\t} else if (dcape_old > 0)\n\t met->pel[ix][iy] = (float) p;\n\tif (dcape < 0 && !isfinite(met->plfc[ix][iy]))\n\t met->cin[ix][iy] += fabsf((float) dcape);\n } while (p > ptop);\n\n /* Check results... */\n if (!isfinite(met->plfc[ix][iy]))\n\tmet->cin[ix][iy] = NAN;\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": "T_CLOUD\", \"METPROC\", NVTX_READ);\n LOG(2, \"Calculate cloud data...\");\n\n /* Loop over columns... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Init... */\n met->pct[ix][iy] = NAN;\n met->pcb[ix][iy] = NAN;\n met->cl[ix][iy] = 0;\n\n /* Loop over pressure levels... */\n for (int ip = 0; ip < met->np - 1; ip++) {\n\n\t/* Check pressure... */\n\tif (met->p[ip] > met->ps[ix][iy] || met->p[ip] < P(20.))\n\t continue;\n\n\t/* Check ice water and liquid water content... */\n\tif (met->iwc[ix][iy][ip] > 0 || met->rwc[ix][iy][ip] > 0\n\t || met->lwc[ix][iy][ip] > 0 || met->swc[ix][iy][ip] > 0) {\n\n\t /* Get cloud top pressure ... */\n\t met->pct[ix][iy]\n\t = (float) (0.5 * (met->p[ip] + (float) met->p[ip + 1]));\n\n\t /* Get cloud bottom pressure ... */\n\t if (!isfinite(met->pcb[ix][iy]))\n\t met->pcb[ix][iy]\n\t = (float) (0.5 * (met->p[ip] + met->p[MAX(ip - 1, 0)]));\n\t}\n\n\t/* Get cloud water... */\n\tmet->cl[ix][iy] += (float)\n\t (0.5 * (met->lwc[ix][iy][ip] + met->lwc[ix][iy][ip + 1]\n\t\t + met->rwc[ix][iy][ip] + met->rwc[ix][iy][ip + 1]\n\t\t + met->iwc[ix][iy][ip] + met->iwc[ix][iy][ip + 1]\n\t\t + met->swc[ix][iy][ip] + met->swc[ix][iy][ip + 1])\n\t * 100. * (met->p[ip] - met->p[ip + 1]) / G0);\n }\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": "(met->lat[1] - met->lat[0]));\n sy = MIN(MAX(1, sy), met->ny / 2);\n\n /* Calculate background... */\nfor (int ix = 0; ix < met->nx; ix++) {\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Calculate Cartesian coordinates... */\n double x0[3];\n geo2cart(0.0, met->lon[ix], met->lat[iy], x0);\n\n /* Calculate box size in longitude... */\n int sx =\n\t(int) (3. * DX2DEG(sigma, met->lat[iy]) /\n\t fabs(met->lon[1] - met->lon[0]));\n sx = MIN(MAX(1, sx), met->nx / 2);\n\n /* Init... */\n float wsum = 0;\n for (int ip = 0; ip < met->np; ip++) {\n\thelp->t[ix][iy][ip] = 0;\n\thelp->u[ix][iy][ip] = 0;\n\thelp->v[ix][iy][ip] = 0;\n\thelp->w[ix][iy][ip] = 0;\n }\n\n /* Loop over neighboring grid points... */\n for (int ix2 = ix - sx; ix2 <= ix + sx; ix2++) {\n\tint ix3 = ix2;\n\tif (ix3 < 0)\n\t ix3 += met->nx;\n\telse if (ix3 >= met->nx)\n\t ix3 -= met->nx;\n\tfor (int iy2 = MAX(iy - sy, 0);\n\t iy2 <= MIN(iy + sy, met->ny - 1); iy2++) {\n\n\t /* Calculate Cartesian coordinates... */\n\t double x1[3];\n\t geo2cart(0.0, met->lon[ix3], met->lat[iy2], x1);\n\n\t /* Calculate weighting factor... */\n\t float w = (float) exp(-DIST2(x0, x1) / tssq);\n\n\t /* Add data... */\n\t wsum += w;\n\t for (int ip = 0; ip < met->np; ip++) {\n\t help->t[ix][iy][ip] += w * met->t[ix3][iy2][ip];\n\t help->u[ix][iy][ip] += w * met->u[ix3][iy2][ip];\n\t help->v[ix][iy][ip] += w * met->v[ix3][iy2][ip];\n\t help->w[ix][iy][ip] += w * met->w[ix3][iy2][ip];\n\t }\n\t}\n }\n\n /* Normalize... */\n for (int ip = 0; ip < met->np; ip++) {\n\thelp->t[ix][iy][ip] /= wsum;\n\thelp->u[ix][iy][ip] /= wsum;\n\thelp->v[ix][iy][ip] /= wsum;\n\thelp->w[ix][iy][ip] /= wsum;\n }\n }\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(3)", "context_chars": 100, "text": "x][iy][ip] /= wsum;\n\thelp->w[ix][iy][ip] /= wsum;\n }\n }\n }\n\n /* Subtract background... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n for (int ip = 0; ip < met->np; ip++) {\n\tmet->t[ix][iy][ip] -= help->t[ix][iy][ip];\n\tmet->u[ix][iy][ip] -= help->u[ix][iy][ip];\n\tmet->v[ix][iy][ip] -= help->v[ix][iy][ip];\n\tmet->w[ix][iy][ip] -= help->w[ix][iy][ip];\n } #pragma omp parallel for default(shared) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": "OLATE\", \"METPROC\", NVTX_READ);\n LOG(2, \"Extrapolate meteo data...\");\n\n /* Loop over columns... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Find lowest valid data point... */\n int ip0;\n for (ip0 = met->np - 1; ip0 >= 0; ip0--)\n\tif (!isfinite(met->t[ix][iy][ip0])\n\t || !isfinite(met->u[ix][iy][ip0])\n\t || !isfinite(met->v[ix][iy][ip0])\n\t || !isfinite(met->w[ix][iy][ip0]))\n\t break;\n\n /* Extrapolate... */\n for (int ip = ip0; ip >= 0; ip--) {\n\tmet->t[ix][iy][ip] = met->t[ix][iy][ip + 1];\n\tmet->u[ix][iy][ip] = met->u[ix][iy][ip + 1];\n\tmet->v[ix][iy][ip] = met->v[ix][iy][ip + 1];\n\tmet->w[ix][iy][ip] = met->w[ix][iy][ip + 1];\n\tmet->h2o[ix][iy][ip] = met->h2o[ix][iy][ip + 1];\n\tmet->o3[ix][iy][ip] = met->o3[ix][iy][ip + 1];\n\tmet->lwc[ix][iy][ip] = met->lwc[ix][iy][ip + 1];\n\tmet->rwc[ix][iy][ip] = met->rwc[ix][iy][ip + 1];\n\tmet->iwc[ix][iy][ip] = met->iwc[ix][iy][ip + 1];\n\tmet->swc[ix][iy][ip] = met->swc[ix][iy][ip + 1];\n\tmet->cc[ix][iy][ip] = met->cc[ix][iy][ip + 1];\n }\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "ROC\", NVTX_READ);\n LOG(2, \"Calculate geopotential heights...\");\n\n /* Calculate log pressure... */\nfor (int ip = 0; ip < met->np; ip++)\n logp[ip] = log(met->p[ip]);\n\n /* Apply hydrostatic equation to calculate geopotential heights... */\n#pragma omp parallel for default(shared) collapse(2)\n for (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Get surface height and pressure... */\n double zs = met->zs[ix][iy];\n double lnps = log(met->ps[ix][iy]);\n\n /* Get temperature and water vapor at the surface... */\n int ip0 = locate_irr(met->p, met->np, met->ps[ix][iy]);\n double ts = LIN(met->p[ip0], met->t[ix][iy][ip0], met->p[ip0 + 1],\n\t\t met->t[ix][iy][ip0 + 1], met->ps[ix][iy]);\n double h2os = LIN(met->p[ip0], met->h2o[ix][iy][ip0], met->p[ip0 + 1],\n\t\t\tmet->h2o[ix][iy][ip0 + 1], met->ps[ix][iy]);\n\n /* Upper part of profile... */\n met->z[ix][iy][ip0 + 1]\n\t= (float) (zs +\n\t\t ZDIFF(lnps, ts, h2os, logp[ip0 + 1],\n\t\t\t met->t[ix][iy][ip0 + 1], met->h2o[ix][iy][ip0 + 1]));\n for (int ip = ip0 + 2; ip < met->np; ip++)\n\tmet->z[ix][iy][ip]\n\t = (float) (met->z[ix][iy][ip - 1] +\n\t\t ZDIFF(logp[ip - 1], met->t[ix][iy][ip - 1],\n\t\t\t met->h2o[ix][iy][ip - 1], logp[ip],\n\t\t\t met->t[ix][iy][ip], met->h2o[ix][iy][ip]));\n\n /* Lower part of profile... */\n met->z[ix][iy][ip0]\n\t= (float) (zs +\n\t\t ZDIFF(lnps, ts, h2os, logp[ip0],\n\t\t\t met->t[ix][iy][ip0], met->h2o[ix][iy][ip0]));\n for (int ip = ip0 - 1; ip >= 0; ip--)\n\tmet->z[ix][iy][ip]\n\t = (float) (met->z[ix][iy][ip + 1] +\n\t\t ZDIFF(logp[ip + 1], met->t[ix][iy][ip + 1],\n\t\t\t met->h2o[ix][iy][ip + 1], logp[ip],\n\t\t\t met->t[ix][iy][ip], met->h2o[ix][iy][ip]));\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": "ogp[ip] = log(met->p[ip]);\n\n /* Apply hydrostatic equation to calculate geopotential heights... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Get surface height and pressure... */\n double zs = met->zs[ix][iy];\n double lnps = log(met->ps[ix][iy]);\n\n /* Get temperature and water vapor at the surface... */\n int ip0 = locate_irr(met->p, met->np, met->ps[ix][iy]);\n double ts = LIN(met->p[ip0], met->t[ix][iy][ip0], met->p[ip0 + 1],\n\t\t met->t[ix][iy][ip0 + 1], met->ps[ix][iy]);\n double h2os = LIN(met->p[ip0], met->h2o[ix][iy][ip0], met->p[ip0 + 1],\n\t\t\tmet->h2o[ix][iy][ip0 + 1], met->ps[ix][iy]);\n\n /* Upper part of profile... */\n met->z[ix][iy][ip0 + 1]\n\t= (float) (zs +\n\t\t ZDIFF(lnps, ts, h2os, logp[ip0 + 1],\n\t\t\t met->t[ix][iy][ip0 + 1], met->h2o[ix][iy][ip0 + 1]));\n for (int ip = ip0 + 2; ip < met->np; ip++)\n\tmet->z[ix][iy][ip]\n\t = (float) (met->z[ix][iy][ip - 1] +\n\t\t ZDIFF(logp[ip - 1], met->t[ix][iy][ip - 1],\n\t\t\t met->h2o[ix][iy][ip - 1], logp[ip],\n\t\t\t met->t[ix][iy][ip], met->h2o[ix][iy][ip]));\n\n /* Lower part of profile... */\n met->z[ix][iy][ip0]\n\t= (float) (zs +\n\t\t ZDIFF(lnps, ts, h2os, logp[ip0],\n\t\t\t met->t[ix][iy][ip0], met->h2o[ix][iy][ip0]));\n for (int ip = ip0 - 1; ip >= 0; ip--)\n\tmet->z[ix][iy][ip]\n\t = (float) (met->z[ix][iy][ip + 1] +\n\t\t ZDIFF(logp[ip + 1], met->t[ix][iy][ip + 1],\n\t\t\t met->h2o[ix][iy][ip + 1], logp[ip],\n\t\t\t met->t[ix][iy][ip], met->h2o[ix][iy][ip]));\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": " 6;\n dy = 4;\n }\n }\n\n /* Calculate weights for smoothing... */\n float ws[dx + 1][dy + 1];\nfor (int ix = 0; ix <= dx; ix++)\n for (int iy = 0; iy < dy; iy++)\n ws[ix][iy] = (1.0f - (float) ix / (float) dx)\n\t* (1.0f - (float) iy / (float) dy);\n\n /* Copy data... */\n#pragma omp parallel for default(shared) collapse(3)\n for (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n for (int ip = 0; ip < met->np; ip++)\n\thelp[ip][ix][iy] = met->z[ix][iy][ip];\n\n /* Horizontal smoothing... */\n#pragma omp parallel for default(shared) collapse(3)\n for (int ip = 0; ip < met->np; ip++)\n for (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\tfloat res = 0, wsum = 0;\n\tint iy0 = MAX(iy - dy + 1, 0);\n\tint iy1 = MIN(iy + dy - 1, met->ny - 1);\n\tfor (int ix2 = ix - dx + 1; ix2 <= ix + dx - 1; ++ix2) {\n\t int ix3 = ix2;\n\t if (ix3 < 0)\n\t ix3 += met->nx;\n\t else if (ix3 >= met->nx)\n\t ix3 -= met->nx;\n\t for (int iy2 = iy0; iy2 <= iy1; ++iy2)\n\t if (isfinite(help[ip][ix3][iy2])) {\n\t float w = ws[abs(ix - ix2)][abs(iy - iy2)];\n\t res += w * help[ip][ix3][iy2];\n\t wsum += w;\n\t }\n\t}\n\tif (wsum > 0)\n\t met->z[ix][iy][ip] = res / wsum;\n\telse\n\t met->z[ix][iy][ip] = NAN;\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(3)", "context_chars": 100, "text": "][iy] = (1.0f - (float) ix / (float) dx)\n\t* (1.0f - (float) iy / (float) dy);\n\n /* Copy data... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n for (int ip = 0; ip < met->np; ip++)\n\thelp[ip][ix][iy] = met->z[ix][iy][ip];\n\n /* Horizontal smoothing... */\n#pragma omp parallel for default(shared) collapse(3)\n for (int ip = 0; ip < met->np; ip++)\n for (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\tfloat res = 0, wsum = 0;\n\tint iy0 = MAX(iy - dy + 1, 0);\n\tint iy1 = MIN(iy + dy - 1, met->ny - 1);\n\tfor (int ix2 = ix - dx + 1; ix2 <= ix + dx - 1; ++ix2) {\n\t int ix3 = ix2;\n\t if (ix3 < 0)\n\t ix3 += met->nx;\n\t else if (ix3 >= met->nx)\n\t ix3 -= met->nx;\n\t for (int iy2 = iy0; iy2 <= iy1; ++iy2)\n\t if (isfinite(help[ip][ix3][iy2])) {\n\t float w = ws[abs(ix - ix2)][abs(iy - iy2)];\n\t res += w * help[ip][ix3][iy2];\n\t wsum += w;\n\t }\n\t}\n\tif (wsum > 0)\n\t met->z[ix][iy][ip] = res / wsum;\n\telse\n\t met->z[ix][iy][ip] = NAN;\n } #pragma omp parallel for default(shared) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(3)", "context_chars": 100, "text": "p = 0; ip < met->np; ip++)\n\thelp[ip][ix][iy] = met->z[ix][iy][ip];\n\n /* Horizontal smoothing... */\nfor (int ip = 0; ip < met->np; ip++)\n for (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\tfloat res = 0, wsum = 0;\n\tint iy0 = MAX(iy - dy + 1, 0);\n\tint iy1 = MIN(iy + dy - 1, met->ny - 1);\n\tfor (int ix2 = ix - dx + 1; ix2 <= ix + dx - 1; ++ix2) {\n\t int ix3 = ix2;\n\t if (ix3 < 0)\n\t ix3 += met->nx;\n\t else if (ix3 >= met->nx)\n\t ix3 -= met->nx;\n\t for (int iy2 = iy0; iy2 <= iy1; ++iy2)\n\t if (isfinite(help[ip][ix3][iy2])) {\n\t float w = ws[abs(ix - ix2)][abs(iy - iy2)];\n\t res += w * help[ip][ix3][iy2];\n\t wsum += w;\n\t }\n\t}\n\tif (wsum > 0)\n\t met->z[ix][iy][ip] = res / wsum;\n\telse\n\t met->z[ix][iy][ip] = NAN;\n } #pragma omp parallel for default(shared) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": ", \"rh\", \"RH\", NULL, NULL, ctl, met, met->h2o, 0.01f))\n WARN(\"Cannot read relative humidity!\");\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n\tfor (int ip = 0; ip < met->np; ip++) {\n\t double pw = met->h2o[ix][iy][ip] * PSAT(met->t[ix][iy][ip]);\n\t met->h2o[ix][iy][ip] =\n\t (float) (pw / (met->p[ip] - (1.0 - EPS) * pw));\n\t} #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(aux,p) collapse(2)", "context_chars": 100, "text": ";\n LOG(2, \"Interpolate meteo data to pressure levels: %s\", varname);\n\n /* Loop over columns... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Copy pressure profile... */\n for (int ip = 0; ip < met->np; ip++)\n\tp[ip] = met->pl[ix][iy][ip];\n\n /* Interpolate... */\n for (int ip = 0; ip < ctl->met_np; ip++) {\n\tdouble pt = ctl->met_p[ip];\n\tif ((pt > p[0] && p[0] > p[1]) || (pt < p[0] && p[0] < p[1]))\n\t pt = p[0];\n\telse if ((pt > p[met->np - 1] && p[1] > p[0])\n\t\t || (pt < p[met->np - 1] && p[1] < p[0]))\n\t pt = p[met->np - 1];\n\tint ip2 = locate_irr(p, met->np, pt);\n\taux[ip] = LIN(p[ip2], var[ix][iy][ip2],\n\t\t p[ip2 + 1], var[ix][iy][ip2 + 1], pt);\n }\n\n /* Copy data... */\n for (int ip = 0; ip < ctl->met_np; ip++)\n\tvar[ix][iy][ip] = (float) aux[ip];\n } #pragma omp parallel for default(shared) private(aux,p) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": ", NVTX_READ);\n LOG(2, \"Make zeta profiles monotone...\");\n\n /* Create monotone zeta profiles... */\nfor (int i = 0; i < met->nx; i++)\n for (int j = 0; j < met->ny; j++) {\n int k = 1;\n\n while (k < met->npl) {\t/* Check if there is an inversion at level k... */\n\tif ((met->zetal[i][j][k - 1] >= met->zetal[i][j][k])) {\n\t /* Find the upper level k+l over the inversion... */\n\t int l = 0;\n\t do {\n\t l++;\n\t }\n\t while ((met->zetal[i][j][k - 1] >=\n\t\t met->zetal[i][j][k + l]) & (k + l < met->npl));\n\n\t /* Interpolate linear between the top and bottom \n\t of the inversion... */\n\t float s =\n\t (float) (met->zetal[i][j][k + l] - met->zetal[i][j][k - 1])\n\t / (float) (met->hybrid[k + l] - met->hybrid[k - 1]);\n\n\t for (int m = k; m < k + l; m++) {\n\t float d = (float) (met->hybrid[m] - met->hybrid[k - 1]);\n\t met->zetal[i][j][m] = s * d + met->zetal[i][j][k - 1];\n\t }\n\n\t /* Search for more inversions above the last inversion ... */\n\t k = k + l;\n\t} else {\n\t k++;\n\t}\n }\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": ". */\n\t k = k + l;\n\t} else {\n\t k++;\n\t}\n }\n }\n\n /* Create monotone pressure profiles... */\nfor (int i = 0; i < met->nx; i++)\n for (int j = 0; j < met->ny; j++) {\n int k = 1;\n\n while (k < met->npl) {\t/* Check if there is an inversion at level k... */\n\tif ((met->pl[i][j][k - 1] <= met->pl[i][j][k])) {\n\t /* Find the upper level k+l over the inversion... */\n\t int l = 0;\n\t do {\n\t l++;\n\t }\n\t while ((met->pl[i][j][k - 1] <= met->pl[i][j][k + l]) & (k + l <\n\t\t\t\t\t\t\t\t met->npl));\n\n\t /* Interpolate linear between the top and bottom \n\t of the inversion... */\n\t float s = (float) (met->pl[i][j][k + l] - met->pl[i][j][k - 1])\n\t / (float) (met->hybrid[k + l] - met->hybrid[k - 1]);\n\n\t for (int m = k; m < k + l; m++) {\n\t float d = (float) (met->hybrid[m] - met->hybrid[k - 1]);\n\t met->pl[i][j][m] = s * d + met->pl[i][j][k - 1];\n\t }\n\n\t /* Search for more inversions above the last inversion ... */\n\t k = k + l;\n\t} else {\n\t k++;\n\t}\n }\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) num_threads(12)", "context_chars": 100, "text": "SG(\"Meteo data layout not implemented for packed netCDF files!\");\n\n /* Copy and check data... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\tif (init)\n\t dest[ix][iy] = 0;\n\tshort aux = help[ARRAY_2D(iy, ix, met->nx)];\n\tif ((fillval == 0 || aux != fillval)\n\t && (missval == 0 || aux != missval)\n\t && fabsf(aux * scalfac + offset) < 1e14f)\n\t dest[ix][iy] += scl * (aux * scalfac + offset);\n\telse\n\t dest[ix][iy] = NAN;\n } #pragma omp parallel for default(shared) num_threads(12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) num_threads(12)", "context_chars": 100, "text": "... */\n if (ctl->met_convention == 0) {\n\n /* Copy and check data (ordering: lat, lon)... */\nfor (int ix = 0; ix < met->nx; ix++)\n\tfor (int iy = 0; iy < met->ny; iy++) {\n\t if (init)\n\t dest[ix][iy] = 0;\n\t float aux = help[ARRAY_2D(iy, ix, met->nx)];\n\t if ((fillval == 0 || aux != fillval)\n\t && (missval == 0 || aux != missval)\n\t && fabsf(aux) < 1e14f)\n\t dest[ix][iy] += scl * aux;\n\t else\n\t dest[ix][iy] = NAN;\n\t} #pragma omp parallel for default(shared) num_threads(12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) num_threads(12)", "context_chars": 100, "text": "\n\t dest[ix][iy] = NAN;\n\t}\n\n } else {\n\n /* Copy and check data (ordering: lon, lat)... */\nfor (int iy = 0; iy < met->ny; iy++)\n\tfor (int ix = 0; ix < met->nx; ix++) {\n\t if (init)\n\t dest[ix][iy] = 0;\n\t float aux = help[ARRAY_2D(ix, iy, met->ny)];\n\t if ((fillval == 0 || aux != fillval)\n\t && (missval == 0 || aux != missval)\n\t && fabsf(aux) < 1e14f)\n\t dest[ix][iy] += scl * aux;\n\t else\n\t dest[ix][iy] = NAN;\n\t} #pragma omp parallel for default(shared) num_threads(12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) num_threads(12)", "context_chars": 100, "text": "SG(\"Meteo data layout not implemented for packed netCDF files!\");\n\n /* Copy and check data... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n\tfor (int ip = 0; ip < met->np; ip++) {\n\t short aux = help[ARRAY_3D(ip, iy, met->ny, ix, met->nx)];\n\t if ((fillval == 0 || aux != fillval)\n\t && (missval == 0 || aux != missval)\n\t && fabsf(aux * scalfac + offset) < 1e14f)\n\t dest[ix][iy][ip] = scl * (aux * scalfac + offset);\n\t else\n\t dest[ix][iy][ip] = NAN;\n\t} #pragma omp parallel for default(shared) num_threads(12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) num_threads(12)", "context_chars": 100, "text": "/\n if (ctl->met_convention == 0) {\n\n /* Copy and check data (ordering: lev, lat, lon)... */\nfor (int ix = 0; ix < met->nx; ix++)\n\tfor (int iy = 0; iy < met->ny; iy++)\n\t for (int ip = 0; ip < met->np; ip++) {\n\t float aux = help[ARRAY_3D(ip, iy, met->ny, ix, met->nx)];\n\t if ((fillval == 0 || aux != fillval)\n\t\t&& (missval == 0 || aux != missval)\n\t\t&& fabsf(aux) < 1e14f)\n\t dest[ix][iy][ip] = scl * aux;\n\t else\n\t dest[ix][iy][ip] = NAN;\n\t } #pragma omp parallel for default(shared) num_threads(12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) num_threads(12)", "context_chars": 100, "text": "ix][iy][ip] = NAN;\n\t }\n\n } else {\n\n /* Copy and check data (ordering: lon, lat, lev)... */\nfor (int ip = 0; ip < met->np; ip++)\n\tfor (int iy = 0; iy < met->ny; iy++)\n\t for (int ix = 0; ix < met->nx; ix++) {\n\t float aux = help[ARRAY_3D(ix, iy, met->ny, ip, met->np)];\n\t if ((fillval == 0 || aux != fillval)\n\t\t&& (missval == 0 || aux != missval)\n\t\t&& fabsf(aux) < 1e14f)\n\t dest[ix][iy][ip] = scl * aux;\n\t else\n\t dest[ix][iy][ip] = NAN;\n\t } #pragma omp parallel for default(shared) num_threads(12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": "012)... */\n const double rib_crit = 0.25, dz = 0.05, umin = 5.0;\n\n /* Loop over grid points... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Set bottom level of PBL... */\n double pbl_bot = met->ps[ix][iy] + DZ2DP(dz, met->ps[ix][iy]);\n\n /* Find lowest level near the bottom... */\n int ip;\n for (ip = 1; ip < met->np; ip++)\n\tif (met->p[ip] < pbl_bot)\n\t break;\n\n /* Get near surface data... */\n double zs = LIN(met->p[ip - 1], met->z[ix][iy][ip - 1],\n\t\t met->p[ip], met->z[ix][iy][ip], pbl_bot);\n double ts = LIN(met->p[ip - 1], met->t[ix][iy][ip - 1],\n\t\t met->p[ip], met->t[ix][iy][ip], pbl_bot);\n double us = LIN(met->p[ip - 1], met->u[ix][iy][ip - 1],\n\t\t met->p[ip], met->u[ix][iy][ip], pbl_bot);\n double vs = LIN(met->p[ip - 1], met->v[ix][iy][ip - 1],\n\t\t met->p[ip], met->v[ix][iy][ip], pbl_bot);\n double h2os = LIN(met->p[ip - 1], met->h2o[ix][iy][ip - 1],\n\t\t\tmet->p[ip], met->h2o[ix][iy][ip], pbl_bot);\n double tvs = THETAVIRT(pbl_bot, ts, h2os);\n\n /* Init... */\n double rib_old = 0;\n\n /* Loop over levels... */\n for (; ip < met->np; ip++) {\n\n\t/* Get squared horizontal wind speed... */\n\tdouble vh2\n\t = SQR(met->u[ix][iy][ip] - us) + SQR(met->v[ix][iy][ip] - vs);\n\tvh2 = MAX(vh2, SQR(umin));\n\n\t/* Calculate bulk Richardson number... */\n\tdouble rib = G0 * 1e3 * (met->z[ix][iy][ip] - zs) / tvs\n\t * (THETAVIRT(met->p[ip], met->t[ix][iy][ip],\n\t\t met->h2o[ix][iy][ip]) - tvs) / vh2;\n\n\t/* Check for critical value... */\n\tif (rib >= rib_crit) {\n\t met->pbl[ix][iy] = (float) (LIN(rib_old, met->p[ip - 1],\n\t\t\t\t\t rib, met->p[ip], rib_crit));\n\t if (met->pbl[ix][iy] > pbl_bot)\n\t met->pbl[ix][iy] = (float) pbl_bot;\n\t break;\n\t}\n\n\t/* Save Richardson number... */\n\trib_old = rib;\n }\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "->lon[met->nx - 2] + met->lon[1] - met->lon[0];\n\n /* Loop over latitudes and pressure levels... */\nfor (int iy = 0; iy < met->ny; iy++) {\n met->ps[met->nx - 1][iy] = met->ps[0][iy];\n met->zs[met->nx - 1][iy] = met->zs[0][iy];\n met->ts[met->nx - 1][iy] = met->ts[0][iy];\n met->us[met->nx - 1][iy] = met->us[0][iy];\n met->vs[met->nx - 1][iy] = met->vs[0][iy];\n met->lsm[met->nx - 1][iy] = met->lsm[0][iy];\n met->sst[met->nx - 1][iy] = met->sst[0][iy];\n for (int ip = 0; ip < met->np; ip++) {\n met->t[met->nx - 1][iy][ip] = met->t[0][iy][ip];\n met->u[met->nx - 1][iy][ip] = met->u[0][iy][ip];\n met->v[met->nx - 1][iy][ip] = met->v[0][iy][ip];\n met->w[met->nx - 1][iy][ip] = met->w[0][iy][ip];\n met->h2o[met->nx - 1][iy][ip] = met->h2o[0][iy][ip];\n met->o3[met->nx - 1][iy][ip] = met->o3[0][iy][ip];\n met->lwc[met->nx - 1][iy][ip] = met->lwc[0][iy][ip];\n met->rwc[met->nx - 1][iy][ip] = met->rwc[0][iy][ip];\n met->iwc[met->nx - 1][iy][ip] = met->iwc[0][iy][ip];\n met->swc[met->nx - 1][iy][ip] = met->swc[0][iy][ip];\n met->cc[met->nx - 1][iy][ip] = met->cc[0][iy][ip];\n }\n for (int ip = 0; ip < met->npl; ip++) {\n met->ul[met->nx - 1][iy][ip] = met->ul[0][iy][ip];\n met->vl[met->nx - 1][iy][ip] = met->vl[0][iy][ip];\n met->wl[met->nx - 1][iy][ip] = met->wl[0][iy][ip];\n met->pl[met->nx - 1][iy][ip] = met->pl[0][iy][ip];\n met->zetal[met->nx - 1][iy][ip] = met->zetal[0][iy][ip];\n met->zeta_dotl[met->nx - 1][iy][ip] = met->zeta_dotl[0][iy][ip];\n }\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "0)\n sign = -1;\n\n /* Look-up table of cosinus and sinus... */\n double clon[EX], slon[EX];\nfor (int ix = 0; ix < met->nx; ix++) {\n clon[ix] = cos(sign * met->lon[ix] / 180. * M_PI);\n slon[ix] = sin(sign * met->lon[ix] / 180. * M_PI);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "_PI);\n slon[ix] = sin(sign * met->lon[ix] / 180. * M_PI);\n }\n\n /* Loop over levels... */\nfor (int ip = 0; ip < met->np; ip++) {\n\n /* Transform 89 degree u and v winds into Cartesian coordinates and take the mean... */\n double vel89x = 0, vel89y = 0;\n for (int ix = 0; ix < met->nx; ix++) {\n\tvel89x +=\n\t (met->u[ix][i89][ip] * clon[ix] -\n\t met->v[ix][i89][ip] * slon[ix]) / met->nx;\n\tvel89y +=\n\t (met->u[ix][i89][ip] * slon[ix] +\n\t met->v[ix][i89][ip] * clon[ix]) / met->nx;\n }\n\n /* Replace 90 degree winds by 89 degree mean... */\n for (int ix = 0; ix < met->nx; ix++) {\n\tmet->u[ix][i90][ip]\n\t = (float) (vel89x * clon[ix] + vel89y * slon[ix]);\n\tmet->v[ix][i90][ip]\n\t = (float) (-vel89x * slon[ix] + vel89y * clon[ix]);\n }\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "ET_PV\", \"METPROC\", NVTX_READ);\n LOG(2, \"Calculate potential vorticity...\");\n\n /* Set powers... */\nfor (int ip = 0; ip < met->np; ip++)\n pows[ip] = pow(1000. / met->p[ip], 0.286);\n\n /* Loop over grid points... */\n#pragma omp parallel for default(shared)\n for (int ix = 0; ix < met->nx; ix++) {\n\n /* Set indices... */\n int ix0 = MAX(ix - 1, 0);\n int ix1 = MIN(ix + 1, met->nx - 1);\n\n /* Loop over grid points... */\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Set indices... */\n int iy0 = MAX(iy - 1, 0);\n int iy1 = MIN(iy + 1, met->ny - 1);\n\n /* Set auxiliary variables... */\n double latr = 0.5 * (met->lat[iy1] + met->lat[iy0]);\n double dx = 1000. * DEG2DX(met->lon[ix1] - met->lon[ix0], latr);\n double dy = 1000. * DEG2DY(met->lat[iy1] - met->lat[iy0]);\n double c0 = cos(met->lat[iy0] / 180. * M_PI);\n double c1 = cos(met->lat[iy1] / 180. * M_PI);\n double cr = cos(latr / 180. * M_PI);\n double vort = 2 * 7.2921e-5 * sin(latr * M_PI / 180.);\n\n /* Loop over grid points... */\n for (int ip = 0; ip < met->np; ip++) {\n\n\t/* Get gradients in longitude... */\n\tdouble dtdx\n\t = (met->t[ix1][iy][ip] - met->t[ix0][iy][ip]) * pows[ip] / dx;\n\tdouble dvdx = (met->v[ix1][iy][ip] - met->v[ix0][iy][ip]) / dx;\n\n\t/* Get gradients in latitude... */\n\tdouble dtdy\n\t = (met->t[ix][iy1][ip] - met->t[ix][iy0][ip]) * pows[ip] / dy;\n\tdouble dudy\n\t = (met->u[ix][iy1][ip] * c1 - met->u[ix][iy0][ip] * c0) / dy;\n\n\t/* Set indices... */\n\tint ip0 = MAX(ip - 1, 0);\n\tint ip1 = MIN(ip + 1, met->np - 1);\n\n\t/* Get gradients in pressure... */\n\tdouble dtdp, dudp, dvdp;\n\tdouble dp0 = 100. * (met->p[ip] - met->p[ip0]);\n\tdouble dp1 = 100. * (met->p[ip1] - met->p[ip]);\n\tif (ip != ip0 && ip != ip1) {\n\t double denom = dp0 * dp1 * (dp0 + dp1);\n\t dtdp = (dp0 * dp0 * met->t[ix][iy][ip1] * pows[ip1]\n\t\t - dp1 * dp1 * met->t[ix][iy][ip0] * pows[ip0]\n\t\t + (dp1 * dp1 - dp0 * dp0) * met->t[ix][iy][ip] * pows[ip])\n\t / denom;\n\t dudp = (dp0 * dp0 * met->u[ix][iy][ip1]\n\t\t - dp1 * dp1 * met->u[ix][iy][ip0]\n\t\t + (dp1 * dp1 - dp0 * dp0) * met->u[ix][iy][ip])\n\t / denom;\n\t dvdp = (dp0 * dp0 * met->v[ix][iy][ip1]\n\t\t - dp1 * dp1 * met->v[ix][iy][ip0]\n\t\t + (dp1 * dp1 - dp0 * dp0) * met->v[ix][iy][ip])\n\t / denom;\n\t} else {\n\t double denom = dp0 + dp1;\n\t dtdp =\n\t (met->t[ix][iy][ip1] * pows[ip1] -\n\t met->t[ix][iy][ip0] * pows[ip0]) / denom;\n\t dudp = (met->u[ix][iy][ip1] - met->u[ix][iy][ip0]) / denom;\n\t dvdp = (met->v[ix][iy][ip1] - met->v[ix][iy][ip0]) / denom;\n\t}\n\n\t/* Calculate PV... */\n\tmet->pv[ix][iy][ip] = (float)\n\t (1e6 * G0 *\n\t (-dtdp * (dvdx - dudy / cr + vort) + dvdp * dtdx - dudp * dtdy));\n }\n }\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "p < met->np; ip++)\n pows[ip] = pow(1000. / met->p[ip], 0.286);\n\n /* Loop over grid points... */\nfor (int ix = 0; ix < met->nx; ix++) {\n\n /* Set indices... */\n int ix0 = MAX(ix - 1, 0);\n int ix1 = MIN(ix + 1, met->nx - 1);\n\n /* Loop over grid points... */\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Set indices... */\n int iy0 = MAX(iy - 1, 0);\n int iy1 = MIN(iy + 1, met->ny - 1);\n\n /* Set auxiliary variables... */\n double latr = 0.5 * (met->lat[iy1] + met->lat[iy0]);\n double dx = 1000. * DEG2DX(met->lon[ix1] - met->lon[ix0], latr);\n double dy = 1000. * DEG2DY(met->lat[iy1] - met->lat[iy0]);\n double c0 = cos(met->lat[iy0] / 180. * M_PI);\n double c1 = cos(met->lat[iy1] / 180. * M_PI);\n double cr = cos(latr / 180. * M_PI);\n double vort = 2 * 7.2921e-5 * sin(latr * M_PI / 180.);\n\n /* Loop over grid points... */\n for (int ip = 0; ip < met->np; ip++) {\n\n\t/* Get gradients in longitude... */\n\tdouble dtdx\n\t = (met->t[ix1][iy][ip] - met->t[ix0][iy][ip]) * pows[ip] / dx;\n\tdouble dvdx = (met->v[ix1][iy][ip] - met->v[ix0][iy][ip]) / dx;\n\n\t/* Get gradients in latitude... */\n\tdouble dtdy\n\t = (met->t[ix][iy1][ip] - met->t[ix][iy0][ip]) * pows[ip] / dy;\n\tdouble dudy\n\t = (met->u[ix][iy1][ip] * c1 - met->u[ix][iy0][ip] * c0) / dy;\n\n\t/* Set indices... */\n\tint ip0 = MAX(ip - 1, 0);\n\tint ip1 = MIN(ip + 1, met->np - 1);\n\n\t/* Get gradients in pressure... */\n\tdouble dtdp, dudp, dvdp;\n\tdouble dp0 = 100. * (met->p[ip] - met->p[ip0]);\n\tdouble dp1 = 100. * (met->p[ip1] - met->p[ip]);\n\tif (ip != ip0 && ip != ip1) {\n\t double denom = dp0 * dp1 * (dp0 + dp1);\n\t dtdp = (dp0 * dp0 * met->t[ix][iy][ip1] * pows[ip1]\n\t\t - dp1 * dp1 * met->t[ix][iy][ip0] * pows[ip0]\n\t\t + (dp1 * dp1 - dp0 * dp0) * met->t[ix][iy][ip] * pows[ip])\n\t / denom;\n\t dudp = (dp0 * dp0 * met->u[ix][iy][ip1]\n\t\t - dp1 * dp1 * met->u[ix][iy][ip0]\n\t\t + (dp1 * dp1 - dp0 * dp0) * met->u[ix][iy][ip])\n\t / denom;\n\t dvdp = (dp0 * dp0 * met->v[ix][iy][ip1]\n\t\t - dp1 * dp1 * met->v[ix][iy][ip0]\n\t\t + (dp1 * dp1 - dp0 * dp0) * met->v[ix][iy][ip])\n\t / denom;\n\t} else {\n\t double denom = dp0 + dp1;\n\t dtdp =\n\t (met->t[ix][iy][ip1] * pows[ip1] -\n\t met->t[ix][iy][ip0] * pows[ip0]) / denom;\n\t dudp = (met->u[ix][iy][ip1] - met->u[ix][iy][ip0]) / denom;\n\t dvdp = (met->v[ix][iy][ip1] - met->v[ix][iy][ip0]) / denom;\n\t}\n\n\t/* Calculate PV... */\n\tmet->pv[ix][iy][ip] = (float)\n\t (1e6 * G0 *\n\t (-dtdp * (dvdx - dudy / cr + vort) + dvdp * dtdx - dudp * dtdy));\n }\n }\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "udy / cr + vort) + dvdp * dtdx - dudp * dtdy));\n }\n }\n }\n\n /* Fix for polar regions... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int ip = 0; ip < met->np; ip++) {\n met->pv[ix][0][ip]\n\t= met->pv[ix][1][ip]\n\t= met->pv[ix][2][ip];\n met->pv[ix][met->ny - 1][ip]\n\t= met->pv[ix][met->ny - 2][ip]\n\t= met->pv[ix][met->ny - 3][ip];\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": ", \"METPROC\", NVTX_READ);\n LOG(2, \"Calculate total column ozone...\");\n\n /* Loop over columns... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n /* Integrate... */\n double cd = 0;\n for (int ip = 1; ip < met->np; ip++)\n\tif (met->p[ip - 1] <= met->ps[ix][iy]) {\n\t double vmr = 0.5 * (met->o3[ix][iy][ip - 1] + met->o3[ix][iy][ip]);\n\t double dp = met->p[ip - 1] - met->p[ip];\n\t cd += vmr * MO3 / MA * dp * 1e2 / G0;\n\t}\n\n /* Convert to Dobson units... */\n met->o3c[ix][iy] = (float) (cd / 2.1415e-5);\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "C\", NVTX_READ);\n LOG(2, \"Calculate tropopause...\");\n\n /* Get altitude and pressure profiles... */\nfor (int iz = 0; iz < met->np; iz++)\n z[iz] = Z(met->p[iz]);\n#pragma omp parallel for default(shared)\n for (int iz = 0; iz <= 190; iz++) {\n z2[iz] = 4.5 + 0.1 * iz;\n p2[iz] = P(z2[iz]);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": " omp parallel for default(shared)\n for (int iz = 0; iz < met->np; iz++)\n z[iz] = Z(met->p[iz]);\nfor (int iz = 0; iz <= 190; iz++) {\n z2[iz] = 4.5 + 0.1 * iz;\n p2[iz] = P(z2[iz]);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": " iz;\n p2[iz] = P(z2[iz]);\n }\n\n /* Do not calculate tropopause... */\n if (ctl->met_tropo == 0)\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n\tmet->pt[ix][iy] = NAN;\n\n /* Use tropopause climatology... */\n else if (ctl->met_tropo == 1) {\n#pragma omp parallel for default(shared) collapse(2)\n for (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n\tmet->pt[ix][iy] = (float) clim_tropo(clim, met->time, met->lat[iy]);\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": "+)\n\tmet->pt[ix][iy] = NAN;\n\n /* Use tropopause climatology... */\n else if (ctl->met_tropo == 1) {\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n\tmet->pt[ix][iy] = (float) clim_tropo(clim, met->time, met->lat[iy]);\n }\n\n /* Use cold point... */\n else if (ctl->met_tropo == 2) {\n\n /* Loop over grid points... */\n#pragma omp parallel for default(shared) private(t,t2) collapse(2)\n for (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n\t/* Interpolate temperature profile... */\n\tfor (int iz = 0; iz < met->np; iz++)\n\t t[iz] = met->t[ix][iy][iz];\n\tspline(z, t, met->np, z2, t2, 171, ctl->met_tropo_spline);\n\n\t/* Find minimum... */\n\tint iz = (int) gsl_stats_min_index(t2, 1, 171);\n\tif (iz > 0 && iz < 170)\n\t met->pt[ix][iy] = (float) p2[iz];\n\telse\n\t met->pt[ix][iy] = NAN;\n }\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(t,t2) collapse(2)", "context_chars": 100, "text": " }\n\n /* Use cold point... */\n else if (ctl->met_tropo == 2) {\n\n /* Loop over grid points... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n\t/* Interpolate temperature profile... */\n\tfor (int iz = 0; iz < met->np; iz++)\n\t t[iz] = met->t[ix][iy][iz];\n\tspline(z, t, met->np, z2, t2, 171, ctl->met_tropo_spline);\n\n\t/* Find minimum... */\n\tint iz = (int) gsl_stats_min_index(t2, 1, 171);\n\tif (iz > 0 && iz < 170)\n\t met->pt[ix][iy] = (float) p2[iz];\n\telse\n\t met->pt[ix][iy] = NAN;\n } #pragma omp parallel for default(shared) private(t,t2) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(t,t2) collapse(2)", "context_chars": 100, "text": "... */\n else if (ctl->met_tropo == 3 || ctl->met_tropo == 4) {\n\n /* Loop over grid points... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n\t/* Interpolate temperature profile... */\n\tint iz;\n\tfor (iz = 0; iz < met->np; iz++)\n\t t[iz] = met->t[ix][iy][iz];\n\tspline(z, t, met->np, z2, t2, 191, ctl->met_tropo_spline);\n\n\t/* Find 1st tropopause... */\n\tmet->pt[ix][iy] = NAN;\n\tfor (iz = 0; iz <= 170; iz++) {\n\t int found = 1;\n\t for (int iz2 = iz + 1; iz2 <= iz + 20; iz2++)\n\t if (LAPSE(p2[iz], t2[iz], p2[iz2], t2[iz2]) > 2.0) {\n\t found = 0;\n\t break;\n\t }\n\t if (found) {\n\t if (iz > 0 && iz < 170)\n\t met->pt[ix][iy] = (float) p2[iz];\n\t break;\n\t }\n\t}\n\n\t/* Find 2nd tropopause... */\n\tif (ctl->met_tropo == 4) {\n\t met->pt[ix][iy] = NAN;\n\t for (; iz <= 170; iz++) {\n\t int found = 1;\n\t for (int iz2 = iz + 1; iz2 <= iz + 10; iz2++)\n\t if (LAPSE(p2[iz], t2[iz], p2[iz2], t2[iz2]) < 3.0) {\n\t\tfound = 0;\n\t\tbreak;\n\t }\n\t if (found)\n\t break;\n\t }\n\t for (; iz <= 170; iz++) {\n\t int found = 1;\n\t for (int iz2 = iz + 1; iz2 <= iz + 20; iz2++)\n\t if (LAPSE(p2[iz], t2[iz], p2[iz2], t2[iz2]) > 2.0) {\n\t\tfound = 0;\n\t\tbreak;\n\t }\n\t if (found) {\n\t if (iz > 0 && iz < 170)\n\t\tmet->pt[ix][iy] = (float) p2[iz];\n\t break;\n\t }\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(t,t2) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(pv,pv2,th,th2) collapse(2)", "context_chars": 100, "text": "se dynamical tropopause... */\n else if (ctl->met_tropo == 5) {\n\n /* Loop over grid points... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n\n\t/* Interpolate potential vorticity profile... */\n\tfor (int iz = 0; iz < met->np; iz++)\n\t pv[iz] = met->pv[ix][iy][iz];\n\tspline(z, pv, met->np, z2, pv2, 171, ctl->met_tropo_spline);\n\n\t/* Interpolate potential temperature profile... */\n\tfor (int iz = 0; iz < met->np; iz++)\n\t th[iz] = THETA(met->p[iz], met->t[ix][iy][iz]);\n\tspline(z, th, met->np, z2, th2, 171, ctl->met_tropo_spline);\n\n\t/* Find dynamical tropopause... */\n\tmet->pt[ix][iy] = NAN;\n\tfor (int iz = 0; iz <= 170; iz++)\n\t if (fabs(pv2[iz]) >= ctl->met_tropo_pv\n\t || th2[iz] >= ctl->met_tropo_theta) {\n\t if (iz > 0 && iz < 170)\n\t met->pt[ix][iy] = (float) p2[iz];\n\t break;\n\t }\n } #pragma omp parallel for default(shared) private(pv,pv2,th,th2) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": " calculate tropopause!\");\n\n /* Interpolate temperature, geopotential height, and water vapor... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++) {\n double h2ot, tt, zt;\n INTPOL_INIT;\n intpol_met_space_3d(met, met->t, met->pt[ix][iy], met->lon[ix],\n\t\t\t met->lat[iy], &tt, ci, cw, 1);\n intpol_met_space_3d(met, met->z, met->pt[ix][iy], met->lon[ix],\n\t\t\t met->lat[iy], &zt, ci, cw, 0);\n intpol_met_space_3d(met, met->h2o, met->pt[ix][iy], met->lon[ix],\n\t\t\t met->lat[iy], &h2ot, ci, cw, 0);\n met->tt[ix][iy] = (float) tt;\n met->zt[ix][iy] = (float) zt;\n met->h2ot[ix][iy] = (float) h2ot;\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "ouble dlat = (ctl->grid_lat1 - ctl->grid_lat0) / ctl->grid_ny;\n\n /* Set vertical coordinates... */\nfor (int iz = 0; iz < ctl->grid_nz; iz++) {\n z[iz] = ctl->grid_z0 + dz * (iz + 0.5);\n press[iz] = P(z[iz]);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": ".. */\n for (int ix = 0; ix < ctl->grid_nx; ix++)\n lon[ix] = ctl->grid_lon0 + dlon * (ix + 0.5);\nfor (int iy = 0; iy < ctl->grid_ny; iy++) {\n lat[iy] = ctl->grid_lat0 + dlat * (iy + 0.5);\n area[iy] = dlat * dlon * SQR(RE * M_PI / 180.)\n * cos(lat[iy] * M_PI / 180.);\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "e t0 = t - 0.5 * ctl->dt_mod;\n double t1 = t + 0.5 * ctl->dt_mod;\n\n /* Get grid box indices... */\nfor (int ip = 0; ip < atm->np; ip++) {\n ixs[ip] = (int) ((atm->lon[ip] - ctl->grid_lon0) / dlon);\n iys[ip] = (int) ((atm->lat[ip] - ctl->grid_lat0) / dlat);\n izs[ip] = (int) ((Z(atm->p[ip]) - ctl->grid_z0) / dz);\n if (atm->time[ip] < t0 || atm->time[ip] > t1\n\t|| ixs[ip] < 0 || ixs[ip] >= ctl->grid_nx\n\t|| iys[ip] < 0 || iys[ip] >= ctl->grid_ny\n\t|| izs[ip] < 0 || izs[ip] >= ctl->grid_nz)\n izs[ip] = -1;\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "ernel * atm->q[iq][ip]);\n }\n }\n\n /* Calculate column density and volume mixing ratio... */\nfor (int ix = 0; ix < ctl->grid_nx; ix++)\n for (int iy = 0; iy < ctl->grid_ny; iy++)\n for (int iz = 0; iz < ctl->grid_nz; iz++) {\n\n\t/* Get grid index... */\n\tint idx = ARRAY_3D(ix, iy, ctl->grid_ny, iz, ctl->grid_nz);\n\n\t/* Calculate column density... */\n\tcd[idx] = NAN;\n\tif (ctl->qnt_m >= 0)\n\t cd[idx] = mean[ctl->qnt_m][idx] / (1e6 * area[iy]);\n\n\t/* Calculate volume mixing ratio (implicit)... */\n\tvmr_impl[idx] = NAN;\n\tif (ctl->qnt_m >= 0 && ctl->molmass > 0 && met0 != NULL\n\t && met1 != NULL) {\n\t vmr_impl[idx] = 0;\n\t if (mean[ctl->qnt_m][idx] > 0) {\n\n\t /* Get temperature... */\n\t double temp;\n\t INTPOL_INIT;\n\t intpol_met_time_3d(met0, met0->t, met1, met1->t, t, press[iz],\n\t\t\t lon[ix], lat[iy], &temp, ci, cw, 1);\n\n\t /* Calculate volume mixing ratio... */\n\t vmr_impl[idx] = MA / ctl->molmass * mean[ctl->qnt_m][idx]\n\t / (RHO(press[iz], temp) * 1e6 * area[iy] * 1e3 * dz);\n\t }\n\t}\n\n\t/* Calculate mean... */\n\tif (np[idx] > 0)\n\t for (int iq = 0; iq < ctl->nq; iq++) {\n\t mean[iq][idx] /= np[idx];\n\t double var = sigma[iq][idx] / np[idx] - SQR(mean[iq][idx]);\n\t sigma[iq][idx] = (var > 0 ? sqrt(var) : 0);\n\t} else\n\t for (int iq = 0; iq < ctl->nq; iq++) {\n\t mean[iq][idx] = NAN;\n\t sigma[iq][idx] = NAN;\n\t }\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) collapse(2)", "context_chars": 100, "text": ") {\n\n float *help;\n\n /* Allocate... */\n ALLOC(help, float,\n\tEX * EY * EP);\n\n /* Copy data... */\nfor (int ix = 0; ix < met->nx; ix++)\n for (int iy = 0; iy < met->ny; iy++)\n for (int ip = 0; ip < met->np; ip++)\n\thelp[ARRAY_3D(ix, iy, met->ny, ip, met->np)] = var[ix][iy][ip];\n\n /* Write uncompressed data... */\n if (ctl->met_type == 1) {\n LOG(2, \"Write 3-D variable: %s (uncompressed)\", varname);\n FWRITE(help, float,\n\t (size_t) (met->nx * met->ny * met->np),\n\t out);\n } #pragma omp parallel for default(shared) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/slcs-jsc/mptrac/src/mptrac.c", "omp_pragma_line": "#pragma omp parallel for default(shared) reduction(+:mass,np)", "context_chars": 100, "text": "le_dz);\n\n /* Init... */\n double mass = 0;\n int np = 0;\n\n /* Loop over air parcels... */\nfor (int ip = 0; ip < atm->np; ip++) {\n\n /* Check time... */\n if (atm->time[ip] < t0 || atm->time[ip] > t1)\n\tcontinue;\n\n /* Check latitude... */\n if (fabs(rlat[i] - atm->lat[ip]) > dlat)\n\tcontinue;\n\n /* Check horizontal distance... */\n double x1[3];\n geo2cart(0, atm->lon[ip], atm->lat[ip], x1);\n if (DIST2(x0, x1) > rmax2)\n\tcontinue;\n\n /* Check pressure... */\n if (ctl->sample_dz > 0)\n\tif (atm->p[ip] > pbot || atm->p[ip] < ptop)\n\t continue;\n\n /* Add mass... */\n if (ctl->qnt_m >= 0)\n\tmass +=\n\t kernel_weight(kz, kw, nk, atm->p[ip]) * atm->q[ctl->qnt_m][ip];\n np++;\n } #pragma omp parallel for default(shared) reduction(+:mass,np)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openucx/ucx/test/gtest/ucp/test_ucp_tag_mt.cc", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nd_data[i] = 0xdeadbeefdeadbeef + 10 * i;\n recv_data[i] = 0;\n }\n\n#if _OPENMP && ENABLE_MT\nfor (int i = 0; i < num_threads; i++) {\n ucs_status_t status;\n int worker_index = 0;\n\n if (get_variant_thread_type() == MULTI_THREAD_CONTEXT) {\n worker_index = i;\n }\n\n send_b(&(send_data[i]), sizeof(send_data[i]), DATATYPE, 0x111337+i,\n NULL, i);\n\n short_progress_loop(worker_index); /* Receive messages as unexpected */\n\n status = recv_b(&(recv_data[i]), sizeof(recv_data[i]), DATATYPE, 0x1337+i,\n 0xffff, &(info[i]), NULL, i);\n ASSERT_UCS_OK(status);\n\n EXPECT_EQ(sizeof(send_data[i]), info[i].length);\n EXPECT_EQ((ucp_tag_t)(0x111337+i), info[i].sender_tag);\n EXPECT_EQ(send_data[i], recv_data[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openucx/ucx/test/gtest/ucp/test_ucp_rma_mt.cc", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "> rkey;\n rkey.resize(num_threads);\n\n /* test parallel rkey unpack */\n#if _OPENMP && ENABLE_MT\nfor (int i = 0; i < num_threads; i++) {\n int worker_index = 0;\n if (get_variant_thread_type() == MULTI_THREAD_CONTEXT) {\n worker_index = i;\n }\n ucs_status_t status = ucp_ep_rkey_unpack(sender().ep(worker_index),\n rkey_buffer, &rkey[i]);\n ASSERT_UCS_OK(status);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openucx/ucx/test/gtest/ucp/test_ucp_rma_mt.cc", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_data[i] = 0xdeadbeefdeadbeef + 10 * i;\n target_data[i] = 0;\n }\n\n#if _OPENMP && ENABLE_MT\nfor (int i = 0; i < num_threads; i++) {\n int worker_index = 0;\n\n if (get_variant_thread_type() == MULTI_THREAD_CONTEXT) {\n worker_index = i;\n }\n\n void* req = ucp_put_nb(sender().ep(worker_index), &orig_data[i],\n sizeof(uint64_t), (uintptr_t)((uint64_t*)memheap + i),\n rkey[i], send_cb);\n request_wait(req, {}, worker_index);\n\n flush_worker(sender(), worker_index);\n\n EXPECT_EQ(orig_data[i], target_data[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openucx/ucx/test/gtest/ucp/test_ucp_rma_mt.cc", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_data[i] = 0xdeadbeefdeadbeef + 10 * i;\n target_data[i] = 0;\n }\n\n#if _OPENMP && ENABLE_MT\nfor (int i = 0; i < num_threads; i++) {\n ucs_status_t status;\n int worker_index = 0;\n\n if (get_variant_thread_type() == MULTI_THREAD_CONTEXT)\n worker_index = i;\n\n status = ucp_put_nbi(sender().ep(worker_index), &orig_data[i], sizeof(uint64_t),\n (uintptr_t)((uint64_t*)memheap + i), rkey[i]);\n ASSERT_UCS_OK_OR_INPROGRESS(status);\n\n flush_worker(sender(), worker_index);\n\n EXPECT_EQ(orig_data[i], target_data[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openucx/ucx/test/gtest/ucp/test_ucp_rma_mt.cc", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_data[i] = 0;\n target_data[i] = 0xdeadbeefdeadbeef + 10 * i;\n }\n\n#if _OPENMP && ENABLE_MT\nfor (int i = 0; i < num_threads; i++) {\n int worker_index = 0;\n\n if (get_variant_thread_type() == MULTI_THREAD_CONTEXT) {\n worker_index = i;\n }\n\n void *req = ucp_get_nb(sender().ep(worker_index), &orig_data[i],\n sizeof(uint64_t), (uintptr_t)((uint64_t*)memheap + i),\n rkey[i], send_cb);\n request_wait(req, {}, worker_index);\n\n flush_worker(sender(), worker_index);\n\n EXPECT_EQ(orig_data[i], target_data[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openucx/ucx/test/gtest/ucp/test_ucp_rma_mt.cc", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_data[i] = 0;\n target_data[i] = 0xdeadbeefdeadbeef + 10 * i;\n }\n\n#if _OPENMP && ENABLE_MT\nfor (int i = 0; i < num_threads; i++) {\n ucs_status_t status;\n int worker_index = 0;\n\n if (get_variant_thread_type() == MULTI_THREAD_CONTEXT) {\n worker_index = i;\n }\n\n status = ucp_get_nbi(sender().ep(worker_index), &orig_data[i], sizeof(uint64_t),\n (uintptr_t)((uint64_t *)memheap + i), rkey[i]);\n ASSERT_UCS_OK_OR_INPROGRESS(status);\n\n flush_worker(sender(), worker_index);\n\n EXPECT_EQ(orig_data[i], target_data[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openucx/ucx/test/gtest/ucp/test_ucp_rma_mt.cc", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "er_index);\n\n EXPECT_EQ(orig_data[i], target_data[i]);\n }\n#endif\n\n#if _OPENMP && ENABLE_MT\nfor (int i = 0; i < num_threads; i++) {\n ucp_rkey_destroy(rkey[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/STREAM/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rintf (\"Number of Threads counted = %i\\n\",k);\n#endif\n\n /* Get initial value for system clock. */\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/STREAM/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rity appears to be \"\n\t \"less than one microsecond.\\n\");\n\tquantum = 1;\n }\n\n t = mysecond();\nfor (j = 0; j < STREAM_ARRAY_SIZE; j++)\n\t\ta[j] = 2.0E0 * a[j];\n t = 1.0E6 * (mysecond() - t);\n\n printf(\"Each test below will take on the order\"\n\t\" of %d microseconds.\\n\", (int) t );\n printf(\" (= %d clock ticks)\\n\", (int) (t/quantum) );\n printf(\"Increase the size of the arrays if this shows that\\n\");\n printf(\"you are not getting at least 20 clock ticks per test.\\n\");\n\n printf(HLINE);\n\n printf(\"WARNING -- The above is only a rough guideline.\\n\");\n printf(\"For best results, please be sure you know the\\n\");\n printf(\"precision of your system timer.\\n\");\n printf(HLINE);\n \n /*\t--- MAIN LOOP --- repeat test cases NTIMES times --- */\n\n scalar = 3.0;\n for (k=0; k #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/STREAM/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (k=0; kfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/STREAM/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " - times[0][k];\n\t\n\ttimes[1][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Scale(scalar);\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/STREAM/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "second() - times[1][k];\n\t\n\ttimes[2][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Add();\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/STREAM/stream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " - times[2][k];\n\t\n\ttimes[3][k] = mysecond();\n#ifdef TUNED\n tuned_STREAM_Triad(scalar);\n#else\nfor (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/linear_algebra.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "anspose of mat_a\nvoid transpose_matrix(double** mat_a, int rows_a, int cols_a, double** mat_c) {\n\n for (int i = 0; i < rows_a; i++) {\n for (int j = 0; j < cols_a; j++) {\n mat_c[j][i] = mat_a[i][j];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/linear_algebra_separate.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "e of mat_a\n// void transpose_matrix(double** mat_a, int rows_a, int cols_a, double** mat_c) {\n\n// // for (int i = 0; i < rows_a; i++) {\n// for (int j = 0; j < cols_a; j++) {\n// mat_c[j][i] = mat_a[i][j];\n// }\n// } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/test_linear_algebra.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k)", "context_chars": 100, "text": "*));\n for (i=0; i// for (i=0; i #pragma omp parallel for private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/test_linear_algebra.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k)", "context_chars": 100, "text": "] = (double)rand()/(double)(RAND_MAX);\n // C[i][j] = 0.0;\n // V[i][j] = 0.0;\n // }\n // }\n\n for (i=0; i #pragma omp parallel for private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/test_linear_algebra.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k)", "context_chars": 100, "text": "(i=0; ifor (i=0; i #pragma omp parallel for private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/test_linear_algebra.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k)", "context_chars": 100, "text": "(i=0; ifor (i=0; i #pragma omp parallel for private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/test_linear_algebra.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k)", "context_chars": 100, "text": "llel for private(i,j,k)\n for (i=0; ifor (i=0; i #pragma omp parallel for private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/test_linear_algebra.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = omp_get_wtime();\n\n\n if (validate) { \n printf(\"Checking result...\\n\"); fflush(stdout);\n // // #pragma omp parallel for reduction(+:ee) private(i,j)\n // for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/test_linear_algebra.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:ee) private(i,j)", "context_chars": 100, "text": "date) { \n printf(\"Checking result...\\n\"); fflush(stdout);\n // #pragma omp parallel for\n // // for (i=0; i #pragma omp parallel for reduction(+:ee) private(i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/mm_blocks/matrix_mul_validated.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k)", "context_chars": 100, "text": " double** mat_b, int cols_b, \n double** mat_c) {\n\n int i, j, k;\n for (i = 0; i < rows_a; i++) {\n for (j = 0; j < cols_b; j++) {\n mat_c[i][j] = 0;\n for (k = 0; k < cols_a; k++) {\n mat_c[i][j] += mat_a[i][k] * mat_b[j][k];\n }\n } \n } #pragma omp parallel for private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/shingles/fp_crunch/fp_utils.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "it_arrays(fa, fb, fc); \n\n // #pragma unroll(DATA_SIZE)\n for( long t = 0; t < n_trials; t++ ){\n for (int i = 0; i < DATA_SIZE; i++) {\n fa[i] += fc[i] * fb[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/kernels/shingles/fp_crunch/fp_utils.c", "omp_pragma_line": "#pragma omp parallel for shared(fa) reduction(+:norm)", "context_chars": 100, "text": "nt i = 0; i < DATA_SIZE; i++) {\n fa[i] += fc[i] * fb[i];\n }\n }\n\n // norm for validation\n for (int i = 0; i < DATA_SIZE; i++) {\n norm += fa[i] * fa[i];\n } #pragma omp parallel for shared(fa) reduction(+:norm)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/CloverLeaf_OpenMP/pack_kernel_c.c", "omp_pragma_line": "#pragma omp parallel for private(j,k,index)", "context_chars": 100, "text": "ATA) {\n x_inc=1;\n y_inc=0;\n }\n if(field_type==Y_FACE_DATA) {\n x_inc=0;\n y_inc=1;\n }\n\nfor (k=y_min-depth;k<=y_max+y_inc+depth;k++) {\n#pragma ivdep\n for (j=1;j<=depth;j++) {\n index=buffer_offset + j+(k+depth-1)*depth;\n left_snd_buffer[FTNREF1D(index,1)]=field[FTNREF2D(x_min+x_inc-1+j,k,x_max+4+x_inc,x_min-2,y_min-2)];\n }\n } #pragma omp parallel for private(j,k,index)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/CloverLeaf_OpenMP/pack_kernel_c.c", "omp_pragma_line": "#pragma omp parallel for private(j,k,index)", "context_chars": 100, "text": "ATA) {\n x_inc=1;\n y_inc=0;\n }\n if(field_type==Y_FACE_DATA) {\n x_inc=0;\n y_inc=1;\n }\n\nfor (k=y_min-depth;k<=y_max+y_inc+depth;k++) {\n#pragma ivdep\n for (j=1;j<=depth;j++) {\n index=buffer_offset + j+(k+depth-1)*depth;\n field[FTNREF2D(x_min-j,k,x_max+4+x_inc,x_min-2,y_min-2)]=left_rcv_buffer[FTNREF1D(index,1)];\n }\n } #pragma omp parallel for private(j,k,index)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/CloverLeaf_OpenMP/pack_kernel_c.c", "omp_pragma_line": "#pragma omp parallel for private(j,k,index)", "context_chars": 100, "text": "ATA) {\n x_inc=1;\n y_inc=0;\n }\n if(field_type==Y_FACE_DATA) {\n x_inc=0;\n y_inc=1;\n }\n\nfor (k=y_min-depth;k<=y_max+y_inc+depth;k++) {\n#pragma ivdep\n for (j=1;j<=depth;j++) {\n index=buffer_offset + j+(k+depth-1)*depth;\n right_snd_buffer[FTNREF1D(index,1)]=field[FTNREF2D(x_max+1-j,k,x_max+4+x_inc,x_min-2,y_min-2)];\n }\n } #pragma omp parallel for private(j,k,index)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/CloverLeaf_OpenMP/pack_kernel_c.c", "omp_pragma_line": "#pragma omp parallel for private(j,k,index)", "context_chars": 100, "text": "ATA) {\n x_inc=1;\n y_inc=0;\n }\n if(field_type==Y_FACE_DATA) {\n x_inc=0;\n y_inc=1;\n }\n\nfor (k=y_min-depth;k<=y_max+y_inc+depth;k++) {\n#pragma ivdep\n for (j=1;j<=depth;j++) {\n index=buffer_offset + j+(k+depth-1)*depth;\n field[FTNREF2D(x_max+x_inc+j,k,x_max+4+x_inc,x_min-2,y_min-2)]=right_rcv_buffer[FTNREF1D(index,1)];\n }\n } #pragma omp parallel for private(j,k,index)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/CloverLeaf_OpenMP/pack_kernel_c.c", "omp_pragma_line": "#pragma omp parallel for private(j,index)", "context_chars": 100, "text": "nc=0;\n }\n if(field_type==Y_FACE_DATA) {\n x_inc=0;\n y_inc=1;\n }\n\n for (k=1;k<=depth;k++) {\nfor (j=x_min-depth;j<=x_max+x_inc+depth;j++) {\n index= buffer_offset + k+(j+depth-1)*depth;\n top_snd_buffer[FTNREF1D(index,1)]=field[FTNREF2D(j,y_max+1-k,x_max+4+x_inc,x_min-2,y_min-2)];\n } #pragma omp parallel for private(j,index)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/CloverLeaf_OpenMP/pack_kernel_c.c", "omp_pragma_line": "#pragma omp parallel for private(j,index)", "context_chars": 100, "text": "nc=0;\n }\n if(field_type==Y_FACE_DATA) {\n x_inc=0;\n y_inc=1;\n }\n\n for (k=1;k<=depth;k++) {\nfor (j=x_min-depth;j<=x_max+x_inc+depth;j++) {\n index= buffer_offset + k+(j+depth-1)*depth;\n bottom_snd_buffer[FTNREF1D(index,1)]=field[FTNREF2D(j,y_min+y_inc-1+k,x_max+4+x_inc,x_min-2,y_min-2)];\n } #pragma omp parallel for private(j,index)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/CloverLeaf_OpenMP/pack_kernel_c.c", "omp_pragma_line": "#pragma omp parallel for private(j,index)", "context_chars": 100, "text": "nc=0;\n }\n if(field_type==Y_FACE_DATA) {\n x_inc=0;\n y_inc=1;\n }\n\n for (k=1;k<=depth;k++) {\nfor (j=x_min-depth;j<=x_max+x_inc+depth;j++) {\n index= buffer_offset + k+(j+depth-1)*depth;\n field[FTNREF2D(j,y_min-k,x_max+4+x_inc,x_min-2,y_min-2)]=bottom_rcv_buffer[FTNREF1D(index,1)];\n } #pragma omp parallel for private(j,index)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/CloverLeaf_OpenMP/pack_kernel_c.c", "omp_pragma_line": "#pragma omp parallel for private(j,index)", "context_chars": 100, "text": "nc=0;\n }\n if(field_type==Y_FACE_DATA) {\n x_inc=0;\n y_inc=1;\n }\n\n for (k=1;k<=depth;k++) {\nfor (j=x_min-depth;j<=x_max+x_inc+depth;j++) {\n index= buffer_offset + k+(j+depth-1)*depth;\n field[FTNREF2D(j,y_max+y_inc+k,x_max+4+x_inc,x_min-2,y_min-2)]=top_rcv_buffer[FTNREF1D(index,1)];\n } #pragma omp parallel for private(j,index)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/xsbench/openmp-threading/GridInit.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "d * sizeof(int);\n\n\t\tdouble du = 1.0 / in.hash_bins;\n\n\t\t// For each energy level in the hash table\n\t\tfor( long e = 0; e < in.hash_bins; e++ )\n\t\t{\n\t\t\tdouble energy = e * du;\n\n\t\t\t// We need to determine the bounding energy levels for all isotopes\n\t\t\tfor( long i = 0; i < in.n_isotopes; i++ )\n\t\t\t{\n\t\t\t\tSD.index_grid[e * in.n_isotopes + i] = grid_search_nuclide( in.n_gridpoints, energy, SD.nuclide_grid + i * in.n_gridpoints, 0, in.n_gridpoints-1);\n\t\t\t}\n\t\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/xsbench/openmp-threading/Simulation.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 100)", "context_chars": 100, "text": "als and Energies\n\t////////////////////////////////////////////////////////////////////////////////\n\tfor( int i = 0; i < in.lookups; i++ )\n\t{\n\t\t// Set the initial seed value\n\t\tuint64_t seed = STARTING_SEED;\t\n\n\t\t// Forward seed to lookup index (we need 2 samples per lookup)\n\t\tseed = fast_forward_LCG(seed, 2*i);\n\n\t\t// Randomly pick an energy and material for the particle\n\t\tdouble p_energy = LCG_random_double(&seed);\n\t\tint mat = pick_mat(&seed); \n\n\t\tSD.p_energy_samples[i] = p_energy;\n\t\tSD.mat_samples[i] = mat;\n\t} #pragma omp parallel for schedule(dynamic, 100)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPCL/benchmarks/miniapps/xsbench/openmp-threading/Simulation.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 100)", "context_chars": 100, "text": "als and Energies\n\t////////////////////////////////////////////////////////////////////////////////\n\tfor( int i = 0; i < in.lookups; i++ )\n\t{\n\t\t// Set the initial seed value\n\t\tuint64_t seed = STARTING_SEED;\t\n\n\t\t// Forward seed to lookup index (we need 2 samples per lookup)\n\t\tseed = fast_forward_LCG(seed, 2*i);\n\n\t\t// Randomly pick an energy and material for the particle\n\t\tdouble p_energy = LCG_random_double(&seed);\n\t\tint mat = pick_mat(&seed); \n\n\t\tSD.p_energy_samples[i] = p_energy;\n\t\tSD.mat_samples[i] = mat;\n\t} #pragma omp parallel for schedule(dynamic, 100)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/flame/blis/kernels/bgq/1/bli_axpyv_bgq_int.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r4double xv, yv, zv;\n vector4double alphav = vec_lds( 0 * sizeof(double), (double*)alpha );\n\n for ( dim_t i = 0; i < n_run; i++ )\n\t{\n xv = vec_lda( 0 * sizeof(double), &x[i*4] );\n yv = vec_lda( 0 * sizeof(double), &y[i*4] );\n zv = vec_madd( alphav, xv, yv );\n vec_sta( zv, 0 * sizeof(double), &y[i*4] );\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/11_black_white_image/black_white_static.c", "omp_pragma_line": "#pragma omp parallel for private(y, color, red, green, blue, tmp) schedule(static)", "context_chars": 100, "text": "int width = gdImageSX(img);\n int height = gdImageSY(img);\n\n double t1 = omp_get_wtime();\n\n for(x=0; x #pragma omp parallel for private(y, color, red, green, blue, tmp) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/11_black_white_image/black_white.c", "omp_pragma_line": "#pragma omp parallel for private(y, color, red, green, blue, tmp)", "context_chars": 100, "text": "int width = gdImageSX(img);\n int height = gdImageSY(img);\n\n double t1 = omp_get_wtime();\n\n for(x=0; x #pragma omp parallel for private(y, color, red, green, blue, tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/11_black_white_image/black_white_dynamic.c", "omp_pragma_line": "#pragma omp parallel for private(y, color, red, green, blue, tmp) schedule(dynamic)", "context_chars": 100, "text": "int width = gdImageSX(img);\n int height = gdImageSY(img);\n\n double t1 = omp_get_wtime();\n\n for(x=0; x #pragma omp parallel for private(y, color, red, green, blue, tmp) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/11_black_white_image/black_white_guided.c", "omp_pragma_line": "#pragma omp parallel for private(y, color, red, green, blue, tmp) schedule(guided)", "context_chars": 100, "text": "int width = gdImageSX(img);\n int height = gdImageSY(img);\n\n double t1 = omp_get_wtime();\n\n for(x=0; x #pragma omp parallel for private(y, color, red, green, blue, tmp) schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/1_pi_calculation/pi_calculation.c", "omp_pragma_line": "#pragma omp parallel for private(x) reduction(+:sum)", "context_chars": 100, "text": "ulate_pi() {\n double sum = 0.0;\n double step = 1.0 / intervals;\n double x;\n int i;\n\n for (i=1; i < intervals; i++) {\n x = step * (i+0.5); // We take 0.5 as we are taking middle point of rectangular area\n sum += 4.0 / (1.0 + x * x);\n } #pragma omp parallel for private(x) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/5_negative_image/negative_critical.c", "omp_pragma_line": "#pragma omp parallel for private(y, color, red, green, blue) num_threads(num_threads)", "context_chars": 100, "text": "int width = gdImageSX(img);\n int height = gdImageSY(img);\n\n double t1 = omp_get_wtime();\n\n for(x=0; x #pragma omp parallel for private(y, color, red, green, blue) num_threads(num_threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/5_negative_image/negative.c", "omp_pragma_line": "#pragma omp parallel for private(y, color, red, green, blue) num_threads(num_threads)", "context_chars": 100, "text": "int width = gdImageSX(img);\n int height = gdImageSY(img);\n\n double t1 = omp_get_wtime();\n\n for(x=0; x #pragma omp parallel for private(y, color, red, green, blue) num_threads(num_threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/7_points_clustering/points_clustering.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(num_threads)", "context_chars": 100, "text": " point\n cluster_count[i] = 1;\n points[i][1] = i;\n }\n}\n\nvoid cluster_points() {\n for (long i=0; i #pragma omp parallel for num_threads(num_threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/2_matrix/matrix.c", "omp_pragma_line": "#pragma omp parallel for private(j, k)", "context_chars": 100, "text": " B[i][j] = rand() % 10;\n }\n }\n}\n\nvoid multiply_matrix() {\n\n int i, j, k;\n\n for (i=0; i < matrix_size; i++) {\n for (j=0; j < matrix_size; j++) {\n C[i][j] = 0;\n for (k=0; k < matrix_size; k++) {\n C[i][j] += A[i][k] * B[k][j];\n }\n }\n } #pragma omp parallel for private(j, k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/6_multitasking/multitask.cpp", "omp_pragma_line": "#pragma omp parallel for private(a) num_threads(num_threads)", "context_chars": 100, "text": "e answer!\n */\ndouble* sine_table(int num) {\n double* sines = new double[num];\n double a;\n\n for (int i=0; i #pragma omp parallel for private(a) num_threads(num_threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nowke/hpc_lab/9_points_classification/points_classification.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(num_threads)", "context_chars": 100, "text": "int dx = x2-x1, dy = y2-y1;\n return (double)sqrt(dx*dx + dy*dy);\n}\n\nvoid classify_points() {\n for (long i=0; i #pragma omp parallel for num_threads(num_threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abhi4578/Parallelization-of-PSO/omp_parallel.c", "omp_pragma_line": "#pragma omp parallel for private(a,b) reduction(min:gBestFitness)", "context_chars": 100, "text": "int)nDimensions];\n double gBestFitness = DBL_MAX;\n int min;\n //particle initialization\n for (i=0; i #pragma omp parallel for private(a,b) reduction(min:gBestFitness)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abhi4578/Parallelization-of-PSO/mpiomp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ain(int argc, char *argv[]) {\n int i,j;\n double nParticles;\n //Argument handling START\n for(i=1; i < argc-1; i++) {\n if (strcmp(argv[i], \"-D\") == 0)\n nDimensions = strtol(argv[i+1],NULL,10);\n else if (strcmp(argv[i], \"-m\") == 0)\n nParticles = strtol(argv[i+1],NULL,10);\n else if (strcmp(argv[i], \"-V\") == 0)\n mVelocity = strtol(argv[i+1],NULL,10);\n else if (strcmp(argv[i], \"-i\") == 0)\n nIterations = strtol(argv[i+1],NULL,10);\n else if (strcmp(argv[i], \"-s\") == 0)\n seed = strtol(argv[i+1],NULL,10);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abhi4578/Parallelization-of-PSO/mpiomp.c", "omp_pragma_line": "#pragma omp parallel for private(a,b) reduction(min:gBestFitness)", "context_chars": 100, "text": "int)nDimensions];\n double gBestFitness = DBL_MAX;\n int min;\n //particle initialization\n for (i=0; i #pragma omp parallel for private(a,b) reduction(min:gBestFitness)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abhi4578/Parallelization-of-PSO/mpiomp.c", "omp_pragma_line": "#pragma omp parallel for private(a,b)", "context_chars": 100, "text": " private(a,b) reduction(min:gBestFitness)\n for (i=0; ifor (j=0; j<(int)nDimensions; j++) {\n a = x_min + (x_max - x_min) * gsl_rng_uniform(r);\n b = x_min + (x_max - x_min) * gsl_rng_uniform(r);\n positions[i][j] = a;\n pBestPositions[i][j] = a;\n velocities[i][j] = (a-b) / 2.;\n } #pragma omp parallel for private(a,b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abhi4578/Parallelization-of-PSO/omp.c", "omp_pragma_line": "#pragma omp parallel for private(a,b) reduction(min:gBestFitness)", "context_chars": 100, "text": "int)nDimensions];\n double gBestFitness = DBL_MAX;\n int min;\n //particle initialization\n for (i=0; i #pragma omp parallel for private(a,b) reduction(min:gBestFitness)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/control/problem.c", "omp_pragma_line": "#pragma omp parallel for private(dest, src, i, j)", "context_chars": 100, "text": " esize *= A->shape[i];\n if(A->order == 'C')\n {\n lda = A->shape[A->ndim-1];\n //for(i = 0; i < nrows; i++)\n for(j = 0; j < ncols; j++)\n {\n dest = j*(size_t)ld+i;\n src = irow[i]*lda+icol[j];\n memcpy(result+dest*esize, A->data+src*esize, esize);\n } #pragma omp parallel for private(dest, src, i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/control/problem.c", "omp_pragma_line": "#pragma omp parallel for private(dest, src, i, j)", "context_chars": 100, "text": " A->data+src*esize, esize);\n }\n }\n else\n {\n lda = A->shape[0];\n //for(i = 0; i < nrows; i++)\n for(j = 0; j < ncols; j++)\n {\n dest = j*(size_t)ld+i;\n src = icol[j]*lda+irow[i];\n memcpy(result+dest*esize, A->data+src*esize, esize);\n } #pragma omp parallel for private(dest, src, i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dfe.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "s_far;\n char symm = F->symm;\n int info = 0;\n // Simple cycle over all far-field blocks\n for(bi = 0; bi < nblocks_far; bi++)\n {\n if(info != 0)\n continue;\n // Get indexes and sizes of block row and column\n STARSH_int i = F->block_far[2*bi];\n STARSH_int j = F->block_far[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n // Rank of a block\n int rank = M->far_rank[bi];\n // Temporary array for more precise dnrm2\n double *D, D_norm[ncols];\n size_t D_size = (size_t)nrows*(size_t)ncols;\n STARSH_PMALLOC(D, D_size, info);\n // Get actual elements of a block\n kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j],\n RD, CD, D, nrows);\n // Get Frobenius norm of a block\n for(size_t k = 0; k < ncols; k++)\n D_norm[k] = cblas_dnrm2(nrows, D+k*nrows, 1);\n double tmpnorm = cblas_dnrm2(ncols, D_norm, 1);\n far_block_norm[bi] = tmpnorm;\n // Get difference of initial and approximated block\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, nrows, ncols,\n rank, -1., U[bi]->data, nrows, V[bi]->data, ncols, 1.,\n D, nrows);\n // Compute Frobenius norm of the latter\n for(size_t k = 0; k < ncols; k++)\n D_norm[k] = cblas_dnrm2(nrows, D+k*nrows, 1);\n free(D);\n double tmpdiff = cblas_dnrm2(ncols, D_norm, 1);\n far_block_diff[bi] = tmpdiff;\n if(i != j && symm == 'S')\n {\n // Multiply by square root of 2 in symmetric case\n // (work on 1 block instead of 2 blocks)\n far_block_norm[bi] *= sqrt2;\n far_block_diff[bi] *= sqrt2;\n }\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dfe.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "// not Error code)\n if(M->onfly == 0)\n // Simple cycle over all near-field blocks\n for(bi = 0; bi < nblocks_near; bi++)\n {\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n // Compute norm of a block\n double *D = M->near_D[bi]->data, D_norm[ncols];\n for(size_t k = 0; k < ncols; k++)\n D_norm[k] = cblas_dnrm2(nrows, D+k*nrows, 1);\n near_block_norm[bi] = cblas_dnrm2(ncols, D_norm, 1);\n if(i != j && symm == 'S')\n // Multiply by square root of 2 in symmetric case\n near_block_norm[bi] *= sqrt2;\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dfe.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "ck_norm[bi] *= sqrt2;\n }\n else\n // Simple cycle over all near-field blocks\n for(bi = 0; bi < nblocks_near; bi++)\n {\n if(info != 0)\n continue;\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n double *D, D_norm[ncols];\n // Allocate temporary array and fill it with elements of a block\n STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);\n kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j],\n RD, CD, D, nrows);\n // Compute norm of a block\n for(size_t k = 0; k < ncols; k++)\n D_norm[k] = cblas_dnrm2(nrows, D+k*nrows, 1);\n // Free temporary buffer\n free(D);\n near_block_norm[bi] = cblas_dnrm2(ncols, D_norm, 1);\n if(i != j && symm == 'S')\n // Multiply by square root of 2 ub symmetric case\n near_block_norm[bi] *= sqrt2;\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "axrank = 100;\n int maxnb = nrows/F->nbrows;\n // Setting B = beta*B\n if(beta == 0.)\n for(int i = 0; i < nrows; i++)\n for(int j = 0; j < nrhs; j++)\n B[j*ldb+i] = 0.;\n else\n #pragma omp parallel for schedule(static)\n for(int i = 0; i < nrows; i++)\n for(int j = 0; j < nrhs; j++)\n B[j*ldb+i] *= beta;\n double *temp_D, *temp_B;\n int num_threads;\n #pragma omp parallel\n #pragma omp master\n num_threads = omp_get_num_threads();\n if(M->onfly == 0)\n {\n STARSH_MALLOC(temp_D, num_threads*nrhs*maxrank);\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "s; i++)\n for(int j = 0; j < nrhs; j++)\n B[j*ldb+i] = 0.;\n else\n for(int i = 0; i < nrows; i++)\n for(int j = 0; j < nrhs; j++)\n B[j*ldb+i] *= beta;\n double *temp_D, *temp_B;\n int num_threads;\n #pragma omp parallel\n #pragma omp master\n num_threads = omp_get_num_threads();\n if(M->onfly == 0)\n {\n STARSH_MALLOC(temp_D, num_threads*nrhs*maxrank);\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "[j] = 0.;\n }\n int ldout = nrows;\n // Simple cycle over all far-field admissible blocks\n for(bi = 0; bi < nblocks_far; bi++)\n {\n // Get indexes of corresponding block row and block column\n STARSH_int i = F->block_far[2*bi];\n STARSH_int j = F->block_far[2*bi+1];\n // Get sizes and rank\n int nrows = R->size[i];\n int ncols = C->size[j];\n int rank = M->far_rank[bi];\n // Get pointers to data buffers\n double *U = M->far_U[bi]->data, *V = M->far_V[bi]->data;\n int info = 0;\n double *D = temp_D+omp_get_thread_num()*nrhs*maxrank;\n double *out = temp_B+omp_get_thread_num()*nrhs*ldout;\n // Multiply low-rank matrix in U*V^T format by a dense matrix\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs,\n rank, alpha, U, nrows, D, rank, 1.0, out+R->start[i], ldout);\n if(i != j && symm == 'S')\n {\n // Multiply low-rank matrix in V*U^T format by a dense matrix\n // U and V are simply swapped in case of symmetric block\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n nrows, 1.0, U, nrows, A+R->start[i], lda, 0.0, D, rank);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, ncols,\n nrhs, rank, alpha, V, ncols, D, rank, 1.0,\n out+C->start[j], ldout);\n }\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": ");\n }\n }\n if(M->onfly == 1)\n // Simple cycle over all near-field blocks\n for(bi = 0; bi < nblocks_near; bi++)\n {\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n int info = 0;\n double *D = temp_D+omp_get_thread_num()*maxnb*maxnb;\n double *out = temp_B+omp_get_thread_num()*nrhs*ldout;\n // Fill temporary buffer with elements of corresponding block\n kernel(nrows, ncols, R->pivot+R->start[i],\n C->pivot+C->start[j], RD, CD, D, nrows);\n // Multiply 2 dense matrices\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0,\n out+R->start[i], ldout);\n if(i != j && symm == 'S')\n {\n // Repeat in case of symmetric matrix\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols,\n nrhs, nrows, alpha, D, nrows, A+R->start[i], lda,\n 1.0, out+C->start[j], ldout);\n }\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "ldout);\n }\n }\n else\n // Simple cycle over all near-field blocks\n for(bi = 0; bi < nblocks_near; bi++)\n {\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n // Get pointers to data buffers\n double *D = M->near_D[bi]->data;\n double *out = temp_B+omp_get_thread_num()*nrhs*ldout;\n // Multiply 2 dense matrices\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0,\n out+R->start[i], ldout);\n if(i != j && symm == 'S')\n {\n // Repeat in case of symmetric matrix\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols,\n nrhs, nrows, alpha, D, nrows, A+R->start[i], lda,\n 1.0, out+C->start[j], ldout);\n }\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": " }\n // Work variables\n int info;\n // Simple cycle over all far-field admissible blocks\n for(bi = 0; bi < nblocks_far; bi++)\n {\n int info;\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_far[2*bi];\n STARSH_int j = block_far[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int mn = nrows > ncols ? ncols : nrows;\n // Get size of temporary arrays\n int lmn = mn, lwork = (4*lmn+8+nrows+ncols)*lmn, liwork = 8*lmn;\n double *D, *work;\n int *iwork;\n size_t D_size = (size_t)nrows*(size_t)ncols;\n // Allocate temporary arrays\n STARSH_PMALLOC(D, D_size, info);\n STARSH_PMALLOC(work, lwork, info);\n STARSH_PMALLOC(iwork, liwork, info);\n // Compute elements of a block\n kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],\n RD, CD, D, nrows);\n starsh_dense_dlrsdd(nrows, ncols, D, nrows, far_U[bi]->data, nrows,\n far_V[bi]->data, ncols, far_rank+bi, maxrank, tol, work, lwork,\n iwork);\n // Free temporary arrays\n free(D);\n free(work);\n free(iwork);\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "ar, 2*new_nblocks_near);\n // At first get all near-field blocks, assumed to be dense\n for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "bi++)\n block_near[bi] = F->block_near[bi];\n // Add false far-field blocks\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": " STARSH_MALLOC(alloc_D, size_D);\n // For each near-field block compute its elements\n for(bi = 0; bi < new_nblocks_near; bi++)\n {\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_near[2*bi];\n STARSH_int j = block_near[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int shape[2] = {nrows, ncols};\n double *D;\n #pragma omp critical\n {\n D = alloc_D+offset_D;\n array_from_buffer(near_D+bi, 2, shape, 'd', 'F', D);\n offset_D += near_D[bi]->size;\n }\n kernel(nrows, ncols, RC->pivot+RC->start[i],\n CC->pivot+CC->start[j], RD, CD, D, nrows);\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/drsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": " }\n // Work variables\n int info;\n // Simple cycle over all far-field admissible blocks\n for(bi = 0; bi < nblocks_far; bi++)\n {\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_far[2*bi];\n STARSH_int j = block_far[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n if(nrows != ncols && BAD_TILE == 0)\n {\n #pragma omp critical\n BAD_TILE = 1;\n STARSH_WARNING(\"This was only tested on square tiles, error of \"\n \"approximation may be much higher, than demanded\");\n }\n int mn = nrows < ncols ? nrows : ncols;\n int mn2 = maxrank+oversample;\n if(mn2 > mn)\n mn2 = mn;\n // Get size of temporary arrays\n int lwork = ncols, lwork_sdd = (4*mn2+7)*mn2;\n if(lwork_sdd > lwork)\n lwork = lwork_sdd;\n lwork += (size_t)mn2*(2*ncols+nrows+mn2+1);\n int liwork = 8*mn2;\n double *D, *work;\n int *iwork;\n int info;\n // Allocate temporary arrays\n STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);\n STARSH_PMALLOC(iwork, liwork, info);\n STARSH_PMALLOC(work, lwork, info);\n // Compute elements of a block\n double time0 = omp_get_wtime();\n kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],\n RD, CD, D, nrows);\n double time1 = omp_get_wtime();\n starsh_dense_dlrrsdd(nrows, ncols, D, nrows, far_U[bi]->data, nrows,\n far_V[bi]->data, ncols, far_rank+bi, maxrank, oversample, tol,\n work, lwork, iwork);\n double time2 = omp_get_wtime();\n #pragma omp critical\n {\n drsdd_time += time2-time1;\n kernel_time += time1-time0;\n }\n // Free temporary arrays\n free(D);\n free(work);\n free(iwork);\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/drsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "ar, 2*new_nblocks_near);\n // At first get all near-field blocks, assumed to be dense\n for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/drsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "bi++)\n block_near[bi] = F->block_near[bi];\n // Add false far-field blocks\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/drsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "STARSH_MALLOC(alloc_D, size_D);\n // For each near-field block compute its elements\n //for(bi = 0; bi < new_nblocks_near; bi++)\n {\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_near[2*bi];\n STARSH_int j = block_near[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int shape[2] = {nrows, ncols};\n double *D;\n #pragma omp critical\n {\n D = alloc_D+offset_D;\n array_from_buffer(near_D+bi, 2, shape, 'd', 'F', D);\n offset_D += near_D[bi]->size;\n }\n double time0 = omp_get_wtime();\n kernel(nrows, ncols, RC->pivot+RC->start[i],\n CC->pivot+CC->start[j], RD, CD, D, nrows);\n double time1 = omp_get_wtime();\n #pragma omp critical\n kernel_time += time1-time0;\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dqp3.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": " }\n // Work variables\n int info;\n // Simple cycle over all far-field admissible blocks\n for(bi = 0; bi < nblocks_far; bi++)\n {\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_far[2*bi];\n STARSH_int j = block_far[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int mn = nrows < ncols ? nrows : ncols;\n int mn2 = maxrank+oversample;\n if(mn2 > mn)\n mn2 = mn;\n // Get size of temporary arrays\n int lwork = 3*ncols+1, lwork_sdd = (4*(size_t)mn2+7)*mn2;\n if(lwork_sdd > lwork)\n lwork = lwork_sdd;\n lwork += (size_t)mn2*(2*ncols+mn2+1)+mn;\n int liwork = ncols, liwork_sdd = 8*mn2;\n if(liwork_sdd > liwork)\n liwork = liwork_sdd;\n double *D, *work;\n int *iwork;\n int info;\n // Allocate temporary arrays\n STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);\n STARSH_PMALLOC(iwork, liwork, info);\n STARSH_PMALLOC(work, lwork, info);\n // Compute elements of a block\n kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],\n RD, CD, D, nrows);\n starsh_dense_dlrqp3(nrows, ncols, D, nrows, far_U[bi]->data, nrows,\n far_V[bi]->data, ncols, far_rank+bi, maxrank, oversample, tol,\n work, lwork, iwork);\n // Free temporary arrays\n free(D);\n free(work);\n free(iwork);\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dqp3.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "ar, 2*new_nblocks_near);\n // At first get all near-field blocks, assumed to be dense\n for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dqp3.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "bi++)\n block_near[bi] = F->block_near[bi];\n // Add false far-field blocks\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/openmp/blrm/dqp3.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": " STARSH_MALLOC(alloc_D, size_D);\n // For each near-field block compute its elements\n for(bi = 0; bi < new_nblocks_near; bi++)\n {\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_near[2*bi];\n STARSH_int j = block_near[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int shape[2] = {nrows, ncols};\n double *D;\n #pragma omp critical\n {\n D = alloc_D+offset_D;\n array_from_buffer(near_D+bi, 2, shape, 'd', 'F', D);\n offset_D += near_D[bi]->size;\n }\n kernel(nrows, ncols, RC->pivot+RC->start[i],\n CC->pivot+CC->start[j], RD, CD, D, nrows);\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dfe.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "far_local;\n char symm = F->symm;\n int info;\n // Simple cycle over all far-field blocks\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n STARSH_int bi = F->block_far_local[lbi];\n // Get indexes and sizes of block row and column\n STARSH_int i = F->block_far[2*bi];\n STARSH_int j = F->block_far[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n // Rank of a block\n int rank = M->far_rank[lbi];\n // Temporary array for more precise dnrm2\n double *D, D_norm[ncols];\n size_t D_size = (size_t)nrows*(size_t)ncols;\n STARSH_PMALLOC(D, D_size, info);\n // Get actual elements of a block\n kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j],\n RD, CD, D, nrows);\n // Get Frobenius norm of a block\n for(STARSH_int k = 0; k < ncols; k++)\n D_norm[k] = cblas_dnrm2(nrows, D+k*(size_t)nrows, 1);\n double tmpnorm = cblas_dnrm2(ncols, D_norm, 1);\n far_block_norm[lbi] = tmpnorm;\n // Get difference of initial and approximated block\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, nrows, ncols,\n rank, -1., U[lbi]->data, nrows, V[lbi]->data, ncols, 1.,\n D, nrows);\n // Compute Frobenius norm of the latter\n for(STARSH_int k = 0; k < ncols; k++)\n D_norm[k] = cblas_dnrm2(nrows, D+k*(size_t)nrows, 1);\n free(D);\n double tmpdiff = cblas_dnrm2(ncols, D_norm, 1);\n far_block_diff[lbi] = tmpdiff;\n if(i != j && symm == 'S')\n {\n // Multiply by square root of 2 in symmetric case\n // (work on 1 block instead of 2 blocks)\n far_block_norm[lbi] *= sqrt2;\n far_block_diff[lbi] *= sqrt2;\n }\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dfe.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "2;\n }\n }\n if(M->onfly == 0)\n // Simple cycle over all near-field blocks\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n {\n STARSH_int bi = F->block_near_local[lbi];\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n // Compute norm of a block\n double *D = M->near_D[lbi]->data, D_norm[ncols];\n for(STARSH_int k = 0; k < ncols; k++)\n D_norm[k] = cblas_dnrm2(nrows, D+k*(size_t)nrows, 1);\n near_block_norm[lbi] = cblas_dnrm2(ncols, D_norm, 1);\n if(i != j && symm == 'S')\n // Multiply by square root of 2 in symmetric case\n near_block_norm[lbi] *= sqrt2;\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dfe.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "k_norm[lbi] *= sqrt2;\n }\n else\n // Simple cycle over all near-field blocks\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n {\n STARSH_int bi = F->block_near_local[lbi];\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n double *D, D_norm[ncols];\n // Allocate temporary array and fill it with elements of a block\n STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);\n kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j],\n RD, CD, D, nrows);\n // Compute norm of a block\n for(STARSH_int k = 0; k < ncols; k++)\n D_norm[k] = cblas_dnrm2(nrows, D+k*(size_t)nrows, 1);\n // Free temporary buffer\n free(D);\n near_block_norm[lbi] = cblas_dnrm2(ncols, D_norm, 1);\n if(i != j && symm == 'S')\n // Multiply by square root of 2 ub symmetric case\n near_block_norm[lbi] *= sqrt2;\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "nrhs*(size_t)nrows; j++)\n out[j] = 0.;\n }\n if(beta != 0. && mpi_rank == 0)\n for(STARSH_int i = 0; i < nrows; i++)\n for(STARSH_int j = 0; j < nrhs; j++)\n temp_B[j*ldb+i] = beta*B[j*ldb+i];\n int ldout = nrows;\n // Simple cycle over all far-field admissible blocks\n #pragma omp parallel for schedule(dynamic, 1)\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n STARSH_int bi = F->block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = F->block_far[2*bi];\n STARSH_int j = F->block_far[2*bi+1];\n // Get sizes and rank\n int nrows = R->size[i];\n int ncols = C->size[j];\n int rank = M->far_rank[lbi];\n if(rank == 0)\n continue;\n // Get pointers to data buffers\n double *U = M->far_U[lbi]->data, *V = M->far_V[lbi]->data;\n int info = 0;\n#ifdef OPENMP\n double *D = temp_D+omp_get_thread_num()*nrhs*maxrank;\n double *out = temp_B+omp_get_thread_num()*nrhs*ldout;\n#else\n double *D = temp_D;\n double *out = temp_B;\n\n // Multiply low-rank matrix in U*V^T format by a dense matrix\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs,\n rank, alpha, U, nrows, D, rank, 1.0, out+R->start[i], ldout);\n if(i != j && symm == 'S')\n {\n // Multiply low-rank matrix in V*U^T format by a dense matrix\n // U and V are simply swapped in case of symmetric block\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n nrows, 1.0, U, nrows, A+R->start[i], lda, 0.0, D, rank);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, ncols,\n nrhs, rank, alpha, V, ncols, D, rank, 1.0,\n out+C->start[j], ldout);\n }\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "eta*B[j*ldb+i];\n int ldout = nrows;\n // Simple cycle over all far-field admissible blocks\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n STARSH_int bi = F->block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = F->block_far[2*bi];\n STARSH_int j = F->block_far[2*bi+1];\n // Get sizes and rank\n int nrows = R->size[i];\n int ncols = C->size[j];\n int rank = M->far_rank[lbi];\n if(rank == 0)\n continue;\n // Get pointers to data buffers\n double *U = M->far_U[lbi]->data, *V = M->far_V[lbi]->data;\n int info = 0;\n#ifdef OPENMP\n double *D = temp_D+omp_get_thread_num()*nrhs*maxrank;\n double *out = temp_B+omp_get_thread_num()*nrhs*ldout;\n#else\n double *D = temp_D;\n double *out = temp_B;\n\n // Multiply low-rank matrix in U*V^T format by a dense matrix\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs,\n rank, alpha, U, nrows, D, rank, 1.0, out+R->start[i], ldout);\n if(i != j && symm == 'S')\n {\n // Multiply low-rank matrix in V*U^T format by a dense matrix\n // U and V are simply swapped in case of symmetric block\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n nrows, 1.0, U, nrows, A+R->start[i], lda, 0.0, D, rank);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, ncols,\n nrhs, rank, alpha, V, ncols, D, rank, 1.0,\n out+C->start[j], ldout);\n }\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": ");\n }\n }\n if(M->onfly == 1)\n // Simple cycle over all near-field blocks\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n {\n STARSH_int bi = F->block_near_local[lbi];\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n int info = 0;\n#ifdef OPENMP\n double *D = temp_D+omp_get_thread_num()*maxnb*maxnb;\n double *out = temp_B+omp_get_thread_num()*nrhs*ldout;\n#else\n double *D = temp_D;\n double *out = temp_B;\n\n // Fill temporary buffer with elements of corresponding block\n kernel(nrows, ncols, R->pivot+R->start[i],\n C->pivot+C->start[j], RD, CD, D, nrows);\n // Multiply 2 dense matrices\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0,\n out+R->start[i], ldout);\n if(i != j && symm == 'S')\n {\n // Repeat in case of symmetric matrix\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols,\n nrhs, nrows, alpha, D, nrows, A+R->start[i], lda,\n 1.0, out+C->start[j], ldout);\n }\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "ldout);\n }\n }\n else\n // Simple cycle over all near-field blocks\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n {\n STARSH_int bi = F->block_near_local[lbi];\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n // Get pointers to data buffers\n double *D = M->near_D[lbi]->data;\n#ifdef OPENMP\n double *out = temp_B+omp_get_thread_num()*nrhs*ldout;\n#else\n double *out = temp_B;\n\n // Multiply 2 dense matrices\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0,\n out+R->start[i], ldout);\n if(i != j && symm == 'S')\n {\n // Repeat in case of symmetric matrix\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols,\n nrhs, nrows, alpha, D, nrows, A+R->start[i], lda,\n 1.0, out+C->start[j], ldout);\n }\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": ";\n }\n }\n // Reduce result to temp_B, corresponding to master openmp thread\n for(int i = 0; i < ldout; i++)\n for(int j = 0; j < nrhs; j++)\n for(int k = 1; k < num_threads; k++)\n temp_B[j*(size_t)ldout+i] +=\n temp_B[(k*(size_t)nrhs+j)*ldout+i];\n // Since I keep result only on root node, following code is commented\n //for(int i = 0; i < nrhs; i++)\n // MPI_Allreduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM,\n // MPI_COMM_WORLD);\n for(int i = 0; i < nrhs; i++)\n MPI_Reduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM, 0,\n MPI_COMM_WORLD);\n free(temp_B);\n free(temp_D);\n return STARSH_SUCCESS;\n}\n\n\n\nint starsh_blrm__dmml_mpi_tlr(STARSH_blrm *matrix, int nrhs, double alpha,\n double *A, int lda, double beta, double *B, int ldb)\n//! Multiply blr-matrix by dense matrix on MPI nodes.\n/*! Performs `C=alpha*A*B+beta*C` with @ref STARSH_blrm `A` and dense matrices\n * `B` and `C`. All the integer types are int, since they are used in BLAS\n * calls. Block-wise low-rank matrix `A` is in TLR format.\n *\n * @param[in] matrix: Pointer to @ref STARSH_blrm object.\n * @param[in] nrhs: Number of right hand sides.\n * @param[in] alpha: Scalar mutliplier.\n * @param[in] A: Dense matrix, right havd side.\n * @param[in] lda: Leading dimension of `A`.\n * @param[in] beta: Scalar multiplier.\n * @param[in] B: Resulting dense matrix.\n * @param[in] ldb: Leading dimension of B.\n * @return Error code @ref STARSH_ERRNO.\n * @ingroup blrm\n * */\n{\n STARSH_blrm *M = matrix;\n STARSH_blrf *F = M->format;\n STARSH_problem *P = F->problem;\n STARSH_kernel *kernel = P->kernel;\n STARSH_int nrows = P->shape[0];\n STARSH_int ncols = P->shape[P->ndim-1];\n // Shorcuts to information about clusters\n STARSH_cluster *R = F->row_cluster;\n STARSH_cluster *C = F->col_cluster;\n void *RD = R->data, *CD = C->data;\n // Number of far-field and near-field blocks\n STARSH_int nblocks_far_local = F->nblocks_far_local;\n STARSH_int nblocks_near_local = F->nblocks_near_local;\n STARSH_int lbi;\n char symm = F->symm;\n int maxrank = 0;\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n if(maxrank < M->far_rank[lbi])\n maxrank = M->far_rank[lbi];\n STARSH_int maxnb = nrows/F->nbrows;\n int mpi_rank, mpi_size;\n MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);\n MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);\n int grid_nx = sqrt(mpi_size), grid_ny = grid_nx, grid_x, grid_y;\n if(grid_nx*grid_ny != mpi_size)\n STARSH_ERROR(\"MPI SIZE MUST BE SQUARE OF INTEGER!\");\n grid_ny = mpi_size / grid_nx;\n grid_x = mpi_rank / grid_nx;\n grid_y = mpi_rank % grid_nx;\n MPI_Group mpi_leadingx_group, mpi_leadingy_group, mpi_world_group;\n MPI_Comm mpi_splitx, mpi_splity, mpi_leadingx, mpi_leadingy;\n MPI_Comm_group(MPI_COMM_WORLD, &mpi_world_group);\n int group_rank[grid_nx];\n for(int i = 0; i < grid_ny; i++)\n group_rank[i] = i;\n MPI_Group_incl(mpi_world_group, grid_ny, group_rank, &mpi_leadingy_group);\n MPI_Comm_create_group(MPI_COMM_WORLD, mpi_leadingy_group, 0,\n &mpi_leadingy);\n for(int i = 0; i < grid_nx; i++)\n group_rank[i] = i*grid_ny;\n MPI_Group_incl(mpi_world_group, grid_nx, group_rank, &mpi_leadingx_group);\n MPI_Comm_create_group(MPI_COMM_WORLD, mpi_leadingx_group, 0,\n &mpi_leadingx);\n MPI_Comm_split(MPI_COMM_WORLD, grid_x, mpi_rank, &mpi_splitx);\n MPI_Comm_split(MPI_COMM_WORLD, grid_y, mpi_rank, &mpi_splity);\n int mpi_leadingx_rank=-1, mpi_leadingx_size=-1;\n int mpi_leadingy_rank=-1, mpi_leadingy_size=-1;\n int mpi_splitx_rank, mpi_splitx_size;\n int mpi_splity_rank, mpi_splity_size;\n if(mpi_leadingx != MPI_COMM_NULL)\n {\n MPI_Comm_rank(mpi_leadingx, &mpi_leadingx_rank);\n MPI_Comm_size(mpi_leadingx, &mpi_leadingx_size);\n }\n if(mpi_leadingy != MPI_COMM_NULL)\n {\n MPI_Comm_rank(mpi_leadingy, &mpi_leadingy_rank);\n MPI_Comm_size(mpi_leadingy, &mpi_leadingy_size);\n }\n MPI_Comm_rank(mpi_splitx, &mpi_splitx_rank);\n MPI_Comm_size(mpi_splitx, &mpi_splitx_size);\n MPI_Comm_rank(mpi_splity, &mpi_splity_rank);\n MPI_Comm_size(mpi_splity, &mpi_splity_size);\n /*\n STARSH_WARNING(\"MPI: GLOBAL=%d/%d LEADX=%d/%d LEADY=%d/%d SPLITX=%d/%d \"\n \"SPLITY=%d/%d\", mpi_rank, mpi_size, mpi_leadingx_rank,\n mpi_leadingx_size, mpi_leadingy_rank, mpi_leadingy_size,\n mpi_splitx_rank, mpi_splitx_size, mpi_splity_rank,\n mpi_splity_size);\n */\n int grid_block_size = maxnb*grid_nx;\n int ld_temp_A = (F->nbcols+grid_nx-1-grid_x)/grid_nx*maxnb;\n double *temp_A;\n STARSH_MALLOC(temp_A, nrhs*(size_t)ld_temp_A);\n if(mpi_leadingx != MPI_COMM_NULL)\n {\n for(STARSH_int i = 0; i < F->nbcols/grid_nx; i++)\n {\n double *src = A+i*grid_block_size;\n double *recv = temp_A+i*maxnb;\n for(int j = 0; j < nrhs; j++)\n {\n MPI_Scatter(src+j*(size_t)lda, maxnb, MPI_DOUBLE,\n recv+j*(size_t)ld_temp_A, maxnb, MPI_DOUBLE, 0,\n mpi_leadingx);\n }\n }\n STARSH_int i = F->nbcols/grid_nx;\n int remain = F->nbcols-i*grid_nx;\n if(remain > 0)\n {\n double *src = A+i*(size_t)grid_block_size;\n double *recv = temp_A+i*(size_t)maxnb;\n if(mpi_rank == 0)\n {\n int sendcounts[grid_nx], displs[grid_nx];\n for(int j = 0; j < remain; j++)\n sendcounts[j] = maxnb;\n for(int j = remain; j < grid_nx; j++)\n sendcounts[j] = 0;\n displs[0] = 0;\n for(int j = 1; j < grid_nx; j++)\n displs[j] = displs[j-1]+sendcounts[j-1];\n for(int j = 0; j < nrhs; j++)\n MPI_Scatterv(src+j*(size_t)lda, sendcounts, displs,\n MPI_DOUBLE, recv+j*(size_t)ld_temp_A, maxnb,\n MPI_DOUBLE, 0, mpi_leadingx);\n }\n else\n {\n int recvcount = 0;\n if(grid_x < remain)\n recvcount = maxnb;\n for(int j = 0; j < nrhs; j++)\n MPI_Scatterv(NULL, NULL, NULL, MPI_DOUBLE,\n recv+j*(size_t)ld_temp_A, recvcount, MPI_DOUBLE, 0,\n mpi_leadingx);\n }\n }\n }\n MPI_Bcast(temp_A, nrhs*(size_t)ld_temp_A, MPI_DOUBLE, 0, mpi_splitx);\n //if(mpi_rank == 0)\n // STARSH_WARNING(\"DATA DISTRIBUTED!\");\n //for(int i = 0; i < nrhs; i++)\n // MPI_Bcast(A+i*lda, ncols, MPI_DOUBLE, 0, MPI_COMM_WORLD);\n double *temp_D, *temp_B;\n int num_threads;\n#ifdef OPENMP\n #pragma omp parallel\n #pragma omp master\n num_threads = omp_get_num_threads();\n#else\n num_threads = 1;\n\n if(M->onfly == 0)\n {\n STARSH_MALLOC(temp_D, num_threads*nrhs*maxrank);\n }\n else\n {\n STARSH_MALLOC(temp_D, num_threads*maxnb*maxnb);\n }\n int ldout = (F->nbrows+grid_ny-1-grid_y)/grid_ny*maxnb;\n //STARSH_WARNING(\"MPI=%d ldA=%d ldB=%d\", mpi_rank, ld_temp_A, ldout);\n STARSH_MALLOC(temp_B, num_threads*(size_t)nrhs*(size_t)ldout);\n // Setting temp_B=beta*B for master thread of root node and B=0 otherwise\n #pragma omp parallel\n {\n#ifdef OPENMP\n double *out = temp_B+omp_get_thread_num()*nrhs*ldout;\n#else\n double *out = temp_B;\n\n for(size_t j = 0; j < nrhs*(size_t)ldout; j++)\n out[j] = 0.;\n }\n if(beta != 0. && mpi_leadingy != MPI_COMM_NULL)\n {\n for(STARSH_int i = 0; i < F->nbrows/grid_ny; i++)\n {\n double *src = B+i*maxnb*grid_ny;\n double *recv = temp_B+i*maxnb;\n for(int j = 0; j < nrhs; j++)\n MPI_Scatter(src+j*(size_t)ldb, maxnb, MPI_DOUBLE,\n recv+j*(size_t)ldout, maxnb, MPI_DOUBLE, 0,\n mpi_leadingy);\n }\n #pragma omp parallel for schedule(static)\n for(int i = 0; i < ldout; i++)\n for(int j = 0; j < nrhs; j++)\n temp_B[j*(size_t)ldb+i] *= beta;\n }\n //if(mpi_rank == 0)\n // STARSH_WARNING(\"MORE DATA DISTRIBUTED\");\n // Simple cycle over all far-field admissible blocks\n #pragma omp parallel for schedule(dynamic, 1)\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n STARSH_int bi = F->block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = F->block_far[2*bi];\n STARSH_int j = F->block_far[2*bi+1];\n // Get sizes and rank\n int nrows = R->size[i];\n int ncols = C->size[j];\n int rank = M->far_rank[lbi];\n if(rank == 0)\n continue;\n // Get pointers to data buffers\n double *U = M->far_U[lbi]->data, *V = M->far_V[lbi]->data;\n int info = 0;\n#ifdef OPENMP\n double *D = temp_D+omp_get_thread_num()*(size_t)nrhs*(size_t)maxrank;\n double *out = temp_B+omp_get_thread_num()*(size_t)nrhs*(size_t)ldout;\n#else\n double *D = temp_D;\n double *out = temp_B;\n\n // Multiply low-rank matrix in U*V^T format by a dense matrix\n //cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n // ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank);\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n ncols, 1.0, V, ncols, temp_A+(j/grid_nx)*maxnb, ld_temp_A, 0.0,\n D, rank);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs,\n rank, alpha, U, nrows, D, rank, 1.0, out+i/grid_ny*maxnb,\n ldout);\n }\n //STARSH_WARNING(\"NODE %d DONE WITH FAR\", mpi_rank);\n if(M->onfly == 1)\n // Simple cycle over all near-field blocks\n #pragma omp parallel for schedule(dynamic, 1)\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n {\n STARSH_int bi = F->block_near_local[lbi];\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n int info = 0;\n#ifdef OPENMP\n double *D = temp_D+omp_get_thread_num()*(size_t)maxnb*\n (size_t)maxnb;\n double *out = temp_B+omp_get_thread_num()*(size_t)nrhs*\n (size_t)ldout;\n#else\n double *D = temp_D;\n double *out = temp_B;\n\n // Fill temporary buffer with elements of corresponding block\n kernel(nrows, ncols, R->pivot+R->start[i],\n C->pivot+C->start[j], RD, CD, D, nrows);\n // Multiply 2 dense matrices\n //cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n // nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0,\n // out+R->start[i], ldout);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n nrhs, ncols, alpha, D, nrows,\n temp_A+(j/grid_nx)*(size_t)maxnb, ld_temp_A, 1.0,\n out+i/grid_ny*(size_t)maxnb, ldout);\n }\n else\n // Simple cycle over all near-field blocks\n #pragma omp parallel for schedule(dynamic, 1)\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n {\n STARSH_int bi = F->block_near_local[lbi];\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n // Get pointers to data buffers\n double *D = M->near_D[lbi]->data;\n#ifdef OPENMP\n double *out = temp_B+omp_get_thread_num()*(size_t)nrhs*\n (size_t)ldout;\n#else\n double *out = temp_B;\n\n // Multiply 2 dense matrices\n //cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n // nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0,\n // out+R->start[i], ldout);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n nrhs, ncols, alpha, D, nrows,\n temp_A+(j/grid_nx)*(size_t)maxnb, ld_temp_A, 1.0,\n out+i/grid_ny*(size_t)maxnb, ldout);\n }\n // Reduce result to temp_B, corresponding to master openmp thread\n #pragma omp parallel for schedule(static)\n for(int i = 0; i < ldout; i++)\n for(int j = 0; j < nrhs; j++)\n for(int k = 1; k < num_threads; k++)\n temp_B[j*(size_t)ldout+i] +=\n temp_B[(k*(size_t)nrhs+j)*ldout+i];\n //STARSH_WARNING(\"NODE %d DONE WITH OMP REDUCTION\", mpi_rank);\n MPI_Barrier(MPI_COMM_WORLD);\n // Since I keep result only on root node, following code is commented\n //for(int i = 0; i < nrhs; i++)\n // MPI_Allreduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM,\n // MPI_COMM_WORLD);\n //for(int i = 0; i < nrhs; i++)\n // MPI_Reduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM, 0,\n // MPI_COMM_WORLD);\n double *final_B = NULL;\n if(mpi_leadingy != MPI_COMM_NULL)\n {\n STARSH_MALLOC(final_B, nrhs*(size_t)ldout);\n #pragma omp parallel for schedule(static)\n for(size_t i = 0; i < nrhs*(size_t)ldout; i++)\n final_B[i] = 0.0;\n }\n MPI_Reduce(temp_B, final_B, nrhs*(size_t)ldout, MPI_DOUBLE, MPI_SUM, 0,\n mpi_splity);\n //STARSH_WARNING(\"REDUCE(%d): %f\", mpi_rank, temp_B[0]);\n //if(mpi_splity_rank == 0)\n // STARSH_WARNING(\"RESULT(%d): %f\", mpi_rank, final_B[0]);\n if(mpi_leadingy != MPI_COMM_NULL)\n {\n for(STARSH_int i = 0; i < F->nbrows/grid_ny; i++)\n {\n double *src = final_B+i*(size_t)maxnb;\n double *recv = B+i*(size_t)maxnb*(size_t)grid_ny;\n for(int j = 0; j < nrhs; j++)\n MPI_Gather(src+j*(size_t)ldout, maxnb, MPI_DOUBLE,\n recv+j*(size_t)ldb, maxnb, MPI_DOUBLE, 0,\n mpi_leadingy);\n }\n STARSH_int i = F->nbrows/grid_ny;\n int remain = F->nbrows-i*grid_ny;\n if(remain > 0)\n {\n double *src = final_B+i*(size_t)maxnb;\n double *recv = B+i*(size_t)maxnb*(size_t)grid_ny;\n if(mpi_rank == 0)\n {\n int recvcounts[grid_ny], displs[grid_ny];\n for(int j = 0; j < remain; j++)\n recvcounts[j] = maxnb;\n for(int j = remain; j < grid_ny; j++)\n recvcounts[j] = 0;\n displs[0] = 0;\n for(int j = 1; j < grid_ny; j++)\n displs[j] = displs[j-1]+recvcounts[j-1];\n for(int j = 0; j < nrhs; j++)\n MPI_Gatherv(src+j*(size_t)ldout, maxnb, MPI_DOUBLE,\n recv+j*(size_t)ldb, recvcounts, displs, MPI_DOUBLE,\n 0, mpi_leadingy);\n }\n else\n {\n int sendcount = 0;\n if(grid_y < remain)\n sendcount = maxnb;\n for(int j = 0; j < nrhs; j++)\n MPI_Gatherv(src+j*(size_t)ldout, sendcount, MPI_DOUBLE,\n NULL, NULL, NULL, MPI_DOUBLE, 0, mpi_leadingy);\n }\n }\n MPI_Comm_free(&mpi_leadingy);\n free(final_B);\n }\n if(mpi_leadingx != MPI_COMM_NULL)\n MPI_Comm_free(&mpi_leadingx);\n MPI_Comm_free(&mpi_splitx);\n MPI_Comm_free(&mpi_splity);\n free(temp_A);\n free(temp_B);\n free(temp_D);\n return STARSH_SUCCESS;\n} #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "ecv+j*(size_t)ldout, maxnb, MPI_DOUBLE, 0,\n mpi_leadingy);\n }\n for(int i = 0; i < ldout; i++)\n for(int j = 0; j < nrhs; j++)\n temp_B[j*(size_t)ldb+i] *= beta;\n }\n //if(mpi_rank == 0)\n // STARSH_WARNING(\"MORE DATA DISTRIBUTED\");\n // Simple cycle over all far-field admissible blocks\n #pragma omp parallel for schedule(dynamic, 1)\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n STARSH_int bi = F->block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = F->block_far[2*bi];\n STARSH_int j = F->block_far[2*bi+1];\n // Get sizes and rank\n int nrows = R->size[i];\n int ncols = C->size[j];\n int rank = M->far_rank[lbi];\n if(rank == 0)\n continue;\n // Get pointers to data buffers\n double *U = M->far_U[lbi]->data, *V = M->far_V[lbi]->data;\n int info = 0;\n#ifdef OPENMP\n double *D = temp_D+omp_get_thread_num()*(size_t)nrhs*(size_t)maxrank;\n double *out = temp_B+omp_get_thread_num()*(size_t)nrhs*(size_t)ldout;\n#else\n double *D = temp_D;\n double *out = temp_B;\n\n // Multiply low-rank matrix in U*V^T format by a dense matrix\n //cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n // ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank);\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n ncols, 1.0, V, ncols, temp_A+(j/grid_nx)*maxnb, ld_temp_A, 0.0,\n D, rank);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs,\n rank, alpha, U, nrows, D, rank, 1.0, out+i/grid_ny*maxnb,\n ldout);\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "ARSH_WARNING(\"MORE DATA DISTRIBUTED\");\n // Simple cycle over all far-field admissible blocks\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n STARSH_int bi = F->block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = F->block_far[2*bi];\n STARSH_int j = F->block_far[2*bi+1];\n // Get sizes and rank\n int nrows = R->size[i];\n int ncols = C->size[j];\n int rank = M->far_rank[lbi];\n if(rank == 0)\n continue;\n // Get pointers to data buffers\n double *U = M->far_U[lbi]->data, *V = M->far_V[lbi]->data;\n int info = 0;\n#ifdef OPENMP\n double *D = temp_D+omp_get_thread_num()*(size_t)nrhs*(size_t)maxrank;\n double *out = temp_B+omp_get_thread_num()*(size_t)nrhs*(size_t)ldout;\n#else\n double *D = temp_D;\n double *out = temp_B;\n\n // Multiply low-rank matrix in U*V^T format by a dense matrix\n //cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n // ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank);\n cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs,\n ncols, 1.0, V, ncols, temp_A+(j/grid_nx)*maxnb, ld_temp_A, 0.0,\n D, rank);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs,\n rank, alpha, U, nrows, D, rank, 1.0, out+i/grid_ny*maxnb,\n ldout);\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "H FAR\", mpi_rank);\n if(M->onfly == 1)\n // Simple cycle over all near-field blocks\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n {\n STARSH_int bi = F->block_near_local[lbi];\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n int info = 0;\n#ifdef OPENMP\n double *D = temp_D+omp_get_thread_num()*(size_t)maxnb*\n (size_t)maxnb;\n double *out = temp_B+omp_get_thread_num()*(size_t)nrhs*\n (size_t)ldout;\n#else\n double *D = temp_D;\n double *out = temp_B;\n\n // Fill temporary buffer with elements of corresponding block\n kernel(nrows, ncols, R->pivot+R->start[i],\n C->pivot+C->start[j], RD, CD, D, nrows);\n // Multiply 2 dense matrices\n //cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n // nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0,\n // out+R->start[i], ldout);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n nrhs, ncols, alpha, D, nrows,\n temp_A+(j/grid_nx)*(size_t)maxnb, ld_temp_A, 1.0,\n out+i/grid_ny*(size_t)maxnb, ldout);\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "size_t)maxnb, ldout);\n }\n else\n // Simple cycle over all near-field blocks\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n {\n STARSH_int bi = F->block_near_local[lbi];\n // Get indexes and sizes of corresponding block row and column\n STARSH_int i = F->block_near[2*bi];\n STARSH_int j = F->block_near[2*bi+1];\n int nrows = R->size[i];\n int ncols = C->size[j];\n // Get pointers to data buffers\n double *D = M->near_D[lbi]->data;\n#ifdef OPENMP\n double *out = temp_B+omp_get_thread_num()*(size_t)nrhs*\n (size_t)ldout;\n#else\n double *out = temp_B;\n\n // Multiply 2 dense matrices\n //cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n // nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0,\n // out+R->start[i], ldout);\n cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows,\n nrhs, ncols, alpha, D, nrows,\n temp_A+(j/grid_nx)*(size_t)maxnb, ld_temp_A, 1.0,\n out+i/grid_ny*(size_t)maxnb, ldout);\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": ")maxnb, ldout);\n }\n // Reduce result to temp_B, corresponding to master openmp thread\n for(int i = 0; i < ldout; i++)\n for(int j = 0; j < nrhs; j++)\n for(int k = 1; k < num_threads; k++)\n temp_B[j*(size_t)ldout+i] +=\n temp_B[(k*(size_t)nrhs+j)*ldout+i];\n //STARSH_WARNING(\"NODE %d DONE WITH OMP REDUCTION\", mpi_rank);\n MPI_Barrier(MPI_COMM_WORLD);\n // Since I keep result only on root node, following code is commented\n //for(int i = 0; i < nrhs; i++)\n // MPI_Allreduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM,\n // MPI_COMM_WORLD);\n //for(int i = 0; i < nrhs; i++)\n // MPI_Reduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM, 0,\n // MPI_COMM_WORLD);\n double *final_B = NULL;\n if(mpi_leadingy != MPI_COMM_NULL)\n {\n STARSH_MALLOC(final_B, nrhs*(size_t)ldout);\n #pragma omp parallel for schedule(static)\n for(size_t i = 0; i < nrhs*(size_t)ldout; i++)\n final_B[i] = 0.0;\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dmml.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "if(mpi_leadingy != MPI_COMM_NULL)\n {\n STARSH_MALLOC(final_B, nrhs*(size_t)ldout);\n for(size_t i = 0; i < nrhs*(size_t)ldout; i++)\n final_B[i] = 0.0;\n }\n MPI_Reduce(temp_B, final_B, nrhs*(size_t)ldout, MPI_DOUBLE, MPI_SUM, 0,\n mpi_splity);\n //STARSH_WARNING(\"REDUCE(%d): %f\", mpi_rank, temp_B[0]);\n //if(mpi_splity_rank == 0)\n // STARSH_WARNING(\"RESULT(%d): %f\", mpi_rank, final_B[0]);\n if(mpi_leadingy != MPI_COMM_NULL)\n {\n for(STARSH_int i = 0; i < F->nbrows/grid_ny; i++)\n {\n double *src = final_B+i*(size_t)maxnb;\n double *recv = B+i*(size_t)maxnb*(size_t)grid_ny;\n for(int j = 0; j < nrhs; j++)\n MPI_Gather(src+j*(size_t)ldout, maxnb, MPI_DOUBLE,\n recv+j*(size_t)ldb, maxnb, MPI_DOUBLE, 0,\n mpi_leadingy);\n }\n STARSH_int i = F->nbrows/grid_ny;\n int remain = F->nbrows-i*grid_ny;\n if(remain > 0)\n {\n double *src = final_B+i*(size_t)maxnb;\n double *recv = B+i*(size_t)maxnb*(size_t)grid_ny;\n if(mpi_rank == 0)\n {\n int recvcounts[grid_ny], displs[grid_ny];\n for(int j = 0; j < remain; j++)\n recvcounts[j] = maxnb;\n for(int j = remain; j < grid_ny; j++)\n recvcounts[j] = 0;\n displs[0] = 0;\n for(int j = 1; j < grid_ny; j++)\n displs[j] = displs[j-1]+recvcounts[j-1];\n for(int j = 0; j < nrhs; j++)\n MPI_Gatherv(src+j*(size_t)ldout, maxnb, MPI_DOUBLE,\n recv+j*(size_t)ldb, recvcounts, displs, MPI_DOUBLE,\n 0, mpi_leadingy);\n }\n else\n {\n int sendcount = 0;\n if(grid_y < remain)\n sendcount = maxnb;\n for(int j = 0; j < nrhs; j++)\n MPI_Gatherv(src+j*(size_t)ldout, sendcount, MPI_DOUBLE,\n NULL, NULL, NULL, MPI_DOUBLE, 0, mpi_leadingy);\n }\n }\n MPI_Comm_free(&mpi_leadingy);\n free(final_B);\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dna.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "r-field admissible blocks\n // Since this is fake low-rank approximation, every tile is dense\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n far_rank[lbi] = -1;\n /*\n #pragma omp parallel for schedule(dynamic, 1)\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n size_t bi = block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n int i = block_far[2*bi];\n int j = block_far[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n if(nrows != ncols && BAD_TILE == 0)\n {\n #pragma omp critical\n BAD_TILE = 1;\n STARSH_WARNING(\"This was only tested on square tiles, error of \"\n \"approximation may be much higher, than demanded\");\n }\n int mn = nrows < ncols ? nrows : ncols;\n int mn2 = maxrank+oversample;\n if(mn2 > mn)\n mn2 = mn;\n // Get size of temporary arrays\n size_t lwork = ncols, lwork_sdd = (4*mn2+7)*mn2;\n if(lwork_sdd > lwork)\n lwork = lwork_sdd;\n lwork += (size_t)mn2*(2*ncols+nrows+mn2+1);\n size_t liwork = 8*mn2;\n double *D, *work;\n int *iwork;\n int info;\n // Allocate temporary arrays\n STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);\n //STARSH_PMALLOC(iwork, liwork, info);\n //STARSH_PMALLOC(work, lwork, info);\n // Compute elements of a block\n double time0 = omp_get_wtime();\n kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],\n RD, CD, D);\n double time1 = omp_get_wtime();\n starsh_kernel_dna(nrows, ncols, D, far_U[lbi]->data,\n far_V[lbi]->data, far_rank+lbi, maxrank, oversample, tol, work,\n lwork, iwork);\n double time2 = omp_get_wtime();\n #pragma omp critical\n {\n drsdd_time += time2-time1;\n kernel_time += time1-time0;\n }\n // Free temporary arrays\n free(D);\n //free(work);\n //free(iwork);\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dna.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": "ule(static)\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n far_rank[lbi] = -1;\n /*\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n size_t bi = block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n int i = block_far[2*bi];\n int j = block_far[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n if(nrows != ncols && BAD_TILE == 0)\n {\n #pragma omp critical\n BAD_TILE = 1;\n STARSH_WARNING(\"This was only tested on square tiles, error of \"\n \"approximation may be much higher, than demanded\");\n }\n int mn = nrows < ncols ? nrows : ncols;\n int mn2 = maxrank+oversample;\n if(mn2 > mn)\n mn2 = mn;\n // Get size of temporary arrays\n size_t lwork = ncols, lwork_sdd = (4*mn2+7)*mn2;\n if(lwork_sdd > lwork)\n lwork = lwork_sdd;\n lwork += (size_t)mn2*(2*ncols+nrows+mn2+1);\n size_t liwork = 8*mn2;\n double *D, *work;\n int *iwork;\n int info;\n // Allocate temporary arrays\n STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);\n //STARSH_PMALLOC(iwork, liwork, info);\n //STARSH_PMALLOC(work, lwork, info);\n // Compute elements of a block\n double time0 = omp_get_wtime();\n kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],\n RD, CD, D);\n double time1 = omp_get_wtime();\n starsh_kernel_dna(nrows, ncols, D, far_U[lbi]->data,\n far_V[lbi]->data, far_rank+lbi, maxrank, oversample, tol, work,\n lwork, iwork);\n double time2 = omp_get_wtime();\n #pragma omp critical\n {\n drsdd_time += time2-time1;\n kernel_time += time1-time0;\n }\n // Free temporary arrays\n free(D);\n //free(work);\n //free(iwork);\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dna.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "new_nblocks_near_local);\n // At first get all near-field blocks, assumed to be dense\n for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n #pragma omp parallel for schedule(static)\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dna.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dna.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dna.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": " STARSH_MALLOC(alloc_D, size_D);\n // For each near-field block compute its elements\n for(lbi = 0; lbi < new_nblocks_near_local; lbi++)\n {\n STARSH_int bi = block_near_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_near[2*bi];\n STARSH_int j = block_near[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int shape[2] = {nrows, ncols};\n double *D;\n #pragma omp critical\n {\n D = alloc_D+offset_D;\n offset_D += nrows*ncols;\n //array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);\n //offset_D += near_D[lbi]->size;\n }\n array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);\n#ifdef OPENMP\n double time0 = omp_get_wtime();\n\n kernel(nrows, ncols, RC->pivot+RC->start[i],\n CC->pivot+CC->start[j], RD, CD, D, nrows);\n#ifdef OPENMP\n double time1 = omp_get_wtime();\n #pragma omp critical\n kernel_time += time1-time0;\n\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": " }\n // Work variables\n int info;\n // Simple cycle over all far-field admissible blocks\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n STARSH_int bi = block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_far[2*bi];\n STARSH_int j = block_far[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int mn = nrows < ncols ? nrows : ncols;\n // Get size of temporary arrays\n int lmn = mn, lwork = (4*lmn+8+nrows+ncols)*lmn, liwork = 8*lmn;\n double *D, *work;\n int *iwork;\n int info;\n // Allocate temporary arrays\n STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);\n STARSH_PMALLOC(iwork, liwork, info);\n STARSH_PMALLOC(work, lwork, info);\n // Compute elements of a block\n#ifdef OPENMP\n double time0 = omp_get_wtime();\n\n kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],\n RD, CD, D, nrows);\n#ifdef OPENMP\n double time1 = omp_get_wtime();\n\n starsh_dense_dlrsdd(nrows, ncols, D, nrows, far_U[lbi]->data, nrows,\n far_V[lbi]->data, ncols, far_rank+lbi, maxrank, tol, work,\n lwork, iwork);\n#ifdef OPENMP\n double time2 = omp_get_wtime();\n #pragma omp critical\n {\n drsdd_time += time2-time1;\n kernel_time += time1-time0;\n }\n\n // Free temporary arrays\n free(D);\n free(work);\n free(iwork);\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "new_nblocks_near_local);\n // At first get all near-field blocks, assumed to be dense\n for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n #pragma omp parallel for schedule(static)\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": " STARSH_MALLOC(alloc_D, size_D);\n // For each near-field block compute its elements\n for(lbi = 0; lbi < new_nblocks_near_local; lbi++)\n {\n STARSH_int bi = block_near_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_near[2*bi];\n STARSH_int j = block_near[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int shape[2] = {nrows, ncols};\n double *D;\n #pragma omp critical\n {\n D = alloc_D+offset_D;\n offset_D += nrows*ncols;\n //array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);\n //offset_D += near_D[lbi]->size;\n }\n array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);\n#ifdef OPENMP\n double time0 = omp_get_wtime();\n\n kernel(nrows, ncols, RC->pivot+RC->start[i],\n CC->pivot+CC->start[j], RD, CD, D, nrows);\n#ifdef OPENMP\n double time1 = omp_get_wtime();\n #pragma omp critical\n kernel_time += time1-time0;\n\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/drsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": " }\n // Work variables\n int info;\n // Simple cycle over all far-field admissible blocks\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n STARSH_int bi = block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_far[2*bi];\n STARSH_int j = block_far[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int mn = nrows < ncols ? nrows : ncols;\n int mn2 = maxrank+oversample;\n if(mn2 > mn)\n mn2 = mn;\n // Get size of temporary arrays\n int lwork = ncols, lwork_sdd = (4*mn2+7)*mn2;\n if(lwork_sdd > lwork)\n lwork = lwork_sdd;\n lwork += (size_t)mn2*(2*ncols+nrows+mn2+1);\n int liwork = 8*mn2;\n double *D, *work;\n int *iwork;\n int info;\n // Allocate temporary arrays\n STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);\n STARSH_PMALLOC(iwork, liwork, info);\n STARSH_PMALLOC(work, lwork, info);\n // Compute elements of a block\n#ifdef OPENMP\n double time0 = omp_get_wtime();\n\n kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],\n RD, CD, D, nrows);\n#ifdef OPENMP\n double time1 = omp_get_wtime();\n\n starsh_dense_dlrrsdd(nrows, ncols, D, nrows, far_U[lbi]->data, nrows,\n far_V[lbi]->data, ncols, far_rank+lbi, maxrank, oversample,\n tol, work, lwork, iwork);\n#ifdef OPENMP\n double time2 = omp_get_wtime();\n #pragma omp critical\n {\n drsdd_time += time2-time1;\n kernel_time += time1-time0;\n }\n\n // Free temporary arrays\n free(D);\n free(work);\n free(iwork);\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/drsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "new_nblocks_near_local);\n // At first get all near-field blocks, assumed to be dense\n for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n #pragma omp parallel for schedule(static)\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/drsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/drsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/drsdd.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": " STARSH_MALLOC(alloc_D, size_D);\n // For each near-field block compute its elements\n for(lbi = 0; lbi < new_nblocks_near_local; lbi++)\n {\n STARSH_int bi = block_near_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_near[2*bi];\n STARSH_int j = block_near[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int shape[2] = {nrows, ncols};\n double *D;\n #pragma omp critical\n {\n D = alloc_D+offset_D;\n offset_D += nrows*ncols;\n //array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);\n //offset_D += near_D[lbi]->size;\n }\n array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);\n#ifdef OPENMP\n double time0 = omp_get_wtime();\n\n kernel(nrows, ncols, RC->pivot+RC->start[i],\n CC->pivot+CC->start[j], RD, CD, D, nrows);\n#ifdef OPENMP\n double time1 = omp_get_wtime();\n #pragma omp critical\n kernel_time += time1-time0;\n\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dqp3.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": " }\n // Work variables\n int info;\n // Simple cycle over all far-field admissible blocks\n for(lbi = 0; lbi < nblocks_far_local; lbi++)\n {\n STARSH_int bi = block_far_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_far[2*bi];\n STARSH_int j = block_far[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int mn = nrows < ncols ? nrows : ncols;\n int mn2 = maxrank+oversample;\n if(mn2 > mn)\n mn2 = mn;\n // Get size of temporary arrays\n int lwork = 3*ncols+1, lwork_sdd = (4*(size_t)mn2+7)*mn2;\n if(lwork_sdd > lwork)\n lwork = lwork_sdd;\n lwork += (size_t)mn2*(2*ncols+mn2+1)+mn;\n int liwork = ncols, liwork_sdd = 8*mn2;\n if(liwork_sdd > liwork)\n liwork = liwork_sdd;\n double *D, *work;\n int *iwork;\n int info;\n // Allocate temporary arrays\n STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);\n STARSH_PMALLOC(iwork, liwork, info);\n STARSH_PMALLOC(work, lwork, info);\n // Compute elements of a block\n#ifdef OPENMP\n double time0 = omp_get_wtime();\n\n kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],\n RD, CD, D, nrows);\n#ifdef OPENMP\n double time1 = omp_get_wtime();\n\n starsh_dense_dlrqp3(nrows, ncols, D, nrows, far_U[lbi]->data, nrows,\n far_V[lbi]->data, ncols, far_rank+lbi, maxrank, oversample,\n tol, work, lwork, iwork);\n#ifdef OPENMP\n double time2 = omp_get_wtime();\n #pragma omp critical\n {\n drsdd_time += time2-time1;\n kernel_time += time1-time0;\n }\n\n // Free temporary arrays\n free(D);\n free(work);\n free(iwork);\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dqp3.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "new_nblocks_near_local);\n // At first get all near-field blocks, assumed to be dense\n for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n #pragma omp parallel for schedule(static)\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dqp3.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " for(bi = 0; bi < 2*nblocks_near; bi++)\n block_near[bi] = F->block_near[bi];\n for(lbi = 0; lbi < nblocks_near_local; lbi++)\n block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n #pragma omp parallel for schedule(static)\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dqp3.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " block_near_local[lbi] = F->block_near_local[lbi];\n // Add false far-field blocks\n for(bi = 0; bi < nblocks_false_far; bi++)\n {\n STARSH_int bj = false_far[bi];\n block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];\n block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ecrc/stars-h/src/backends/mpi/blrm/dqp3.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 1)", "context_chars": 100, "text": " STARSH_MALLOC(alloc_D, size_D);\n // For each near-field block compute its elements\n for(lbi = 0; lbi < new_nblocks_near_local; lbi++)\n {\n STARSH_int bi = block_near_local[lbi];\n // Get indexes of corresponding block row and block column\n STARSH_int i = block_near[2*bi];\n STARSH_int j = block_near[2*bi+1];\n // Get corresponding sizes and minimum of them\n int nrows = RC->size[i];\n int ncols = CC->size[j];\n int shape[2] = {nrows, ncols};\n double *D;\n #pragma omp critical\n {\n D = alloc_D+offset_D;\n offset_D += nrows*ncols;\n //array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);\n //offset_D += near_D[lbi]->size;\n }\n array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);\n#ifdef OPENMP\n double time0 = omp_get_wtime();\n\n kernel(nrows, ncols, RC->pivot+RC->start[i],\n CC->pivot+CC->start[j], RD, CD, D, nrows);\n#ifdef OPENMP\n double time1 = omp_get_wtime();\n #pragma omp critical\n kernel_time += time1-time0;\n\n } #pragma omp parallel for schedule(dynamic, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PanosAntoniadis/pps-ntua/Lab1/ex2/parallel/OpenMP/fws_parfor.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) shared(A, k, N)", "context_chars": 100, "text": "alloc(N*sizeof(int));\n\n\tgraph_init_random(A,-1,N,128*N);\n\n\tgettimeofday(&t1,0);\n\tfor(k=0;kfor(i=0; i #pragma omp parallel for private(i, j) shared(A, k, N)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PanosAntoniadis/pps-ntua/Lab1/ex1/parallel/GoL_p.c", "omp_pragma_line": "#pragma omp parallel for shared(N, previous, current) private(i, j, nbrs)", "context_chars": 100, "text": "r ( t = 0 ; t < T ; t++ ) {\n\t\t/* Use OpenMP parallel for in order to parallelize i and j loops */\n\t\tfor ( i = 1 ; i < N-1 ; i++ )\n\t\t\tfor ( j = 1 ; j < N-1 ; j++ ) {\n\t\t\t\tnbrs = previous[i+1][j+1] + previous[i+1][j] + previous[i+1][j-1] \\\n\t\t\t\t\t+ previous[i][j-1] + previous[i][j+1] \\\n\t\t\t\t\t+ previous[i-1][j-1] + previous[i-1][j] + previous[i-1][j+1];\n\t\t\t\tif ( nbrs == 3 || ( previous[i][j]+nbrs ==3 ) )\n\t\t\t\t\tcurrent[i][j]=1;\n\t\t\t\telse\n\t\t\t\t\tcurrent[i][j]=0;\n\t\t\t} #pragma omp parallel for shared(N, previous, current) private(i, j, nbrs)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " gcount(ngp);\n if(Rf_isNull(gs)) {\n for(int i = 0; i != l; ++i) ++gcount[g[i]];\n for(int i = 1; i < ngp; ++i) {\n // if(gcount[i] == 0) stop(\"Group size of 0 encountered. This is probably because of unused factor levels. Use fdroplevels(f) to drop them.\");\n if(gcount[i] > 0) {\n gmap[i] = std::vector (gcount[i]);\n gcount[i] = 0;\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " IntegerVector gsv = gs;\n if(ng != gsv.size()) stop(\"ng must match length(gs)\");\n for(int i = 0; i < ng; ++i) {\n // if(gsv[i] == 0) stop(\"Group size of 0 encountered. This is probably because of unused factor levels. Use fdroplevels(f) to drop them.\");\n if(gsv[i] > 0) gmap[i+1] = std::vector (gsv[i]);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "L);\n for(int i = 0; i != l; ++i) if(nisnan(x[i])) gmap[g[i]][gcount[g[i]]++] = x[i];\n for(int i = 1; i < ngp; ++i) {\n if(gcount[i] != 0) {\n int n = gcount[i], nth = lower ? (n-1)*Q : n*Q;\n auto begin = gmap[i].begin(), mid = begin + nth, end = begin + n;\n std::nth_element(begin, mid, end);\n out[i-1] = (tiesmean && n%2 == 0) ? (*(mid) + *(std::min_element(mid+1, end)))*0.5 : *(mid);\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n } else {\n gmap[g[i]][gcount[g[i]]++] = x[i];\n }\n }\n for(int i = 0; i < ng; ++i) {\n if(isnan2(out[i]) || gcount[i+1] == 0) continue;\n int n = gcount[i+1], nth = lower ? (n-1)*Q : n*Q;\n auto begin = gmap[i+1].begin(), mid = begin + nth, end = begin + n;\n std::nth_element(begin, mid, end);\n out[i] = (tiesmean && n%2 == 0) ? (*(mid) + *(std::min_element(mid+1, end)))*0.5 : *(mid);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "eads > col) nthreads = col;\n NumericVector out = no_init_vector(col);\n if(narm) {\n for(int j = 0; j < col; ++j) {\n NumericMatrix::ConstColumn colj = x( _ , j);\n NumericVector column = no_init_vector(l); // without multithreading this could be taken out of the loop, see previous version of the code.\n double *begin = column.begin(), *pend = std::remove_copy_if(colj.begin(), colj.end(), begin, isnan2);\n int sz = pend - begin, nth = lower ? (sz-1)*Q : sz*Q;\n if(sz == 0) {\n out[j] = colj[0];\n } else {\n std::nth_element(begin, begin+nth, pend);\n out[j] = (tiesmean && sz%2 == 0) ? (column[nth] + *(std::min_element(begin+nth+1, pend)))*0.5 : column[nth];\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " } else {\n int nth = lower ? (l-1)*Q : l*Q;\n bool tm = tiesmean && l%2 == 0;\n for(int j = 0; j < col; ++j) {\n {\n NumericMatrix::ConstColumn colj = x( _ , j);\n for(int i = 0; i != l; ++i) {\n if(isnan2(colj[i])) {\n out[j] = colj[i];\n goto endloop;\n }\n }\n NumericVector column = Rf_duplicate(wrap(colj)); // best ?\n std::nth_element(column.begin(), column.begin()+nth, column.end());\n out[j] = tm ? (column[nth] + *(std::min_element(column.begin()+nth+1, column.end())))*0.5 : column[nth];\n }\n endloop:;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " gcount(ngp);\n if(Rf_isNull(gs)) {\n for(int i = 0; i != l; ++i) ++gcount[g[i]];\n for(int i = 1; i < ngp; ++i) {\n // if(gcount[i] == 0) stop(\"Group size of 0 encountered. This is probably because of unused factor levels. Use fdroplevels(f) to drop them.\");\n if(gcount[i] > 0) gmap[i] = std::vector (gcount[i]);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " IntegerVector gsv = gs;\n if(ng != gsv.size()) stop(\"ng must match length(gs)\");\n for(int i = 0; i < ng; ++i) {\n // if(gsv[i] == 0) stop(\"Group size of 0 encountered. This is probably because of unused factor levels. Use fdroplevels(f) to drop them.\");\n if(gsv[i] > 0) gmap[i+1] = std::vector (gsv[i]);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "for(int i = 0; i != l; ++i) if(nisnan(column[i])) gmap[g[i]][gcount[g[i]]++] = column[i];\n for(int i = 1; i < ngp; ++i) {\n if(gcount[i] != 0) {\n int n = gcount[i], nth = lower ? (n-1)*Q : n*Q;\n auto begin = gmap[i].begin(), mid = begin + nth, end = begin + n;\n std::nth_element(begin, mid, end);\n nthj[i-1] = (tiesmean && n%2 == 0) ? (*(mid) + *(std::min_element(mid+1, end)))*0.5 : *(mid);\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " } else {\n gmap[g[i]][gcount[g[i]]++] = column[i];\n }\n }\n for(int i = 0; i < ng; ++i) {\n if(isnan2(nthj[i]) || gcount[i+1] == 0) continue;\n int n = gcount[i+1], nth = lower ? (n-1)*Q : n*Q;\n auto begin = gmap[i+1].begin(), mid = begin + nth, end = begin + n;\n std::nth_element(begin, mid, end);\n nthj[i] = (tiesmean && n%2 == 0) ? (*(mid) + *(std::min_element(mid+1, end)))*0.5 : *(mid);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "lement(begin+nth+1, pend)))*0.5 : column[nth];\n }\n }\n } else {\n for(int j = 0; j < l; ++j) out[j] = median_narm(x[j], lower, tiesmean, Q);\n }\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < l; ++j) out[j] = median_keepna(x[j], lower, tiesmean, Q);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "j = 0; j < l; ++j) out[j] = median_narm(x[j], lower, tiesmean, Q);\n }\n } else {\n for(int j = 0; j < l; ++j) out[j] = median_keepna(x[j], lower, tiesmean, Q);\n }\n if(drop) {\n Rf_setAttrib(out, R_NamesSymbol, Rf_getAttrib(x, R_NamesSymbol));\n return out;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "count(ngp);\n if(Rf_isNull(gs)) {\n for(int i = 0; i != lx1; ++i) ++gcount[g[i]];\n for(int i = 1; i < ngp; ++i) {\n // if(gcount[i] == 0) stop(\"Group size of 0 encountered. This is probably because of unused factor levels. Use fdroplevels(f) to drop them.\");\n if(gcount[i] > 0) gmap[i] = std::vector (gcount[i]);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " IntegerVector gsv = gs;\n if(ng != gsv.size()) stop(\"ng must match length(gs)\");\n for(int i = 0; i < ng; ++i) {\n // if(gsv[i] == 0) stop(\"Group size of 0 encountered. This is probably because of unused factor levels. Use fdroplevels(f) to drop them.\");\n if(gsv[i] > 0) gmap[i+1] = std::vector (gsv[i]);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "r(int i = 0; i != lx1; ++i) if(nisnan(column[i])) gmap[g[i]][gcount[g[i]]++] = column[i];\n for(int i = 1; i < ngp; ++i) {\n if(gcount[i] != 0) {\n int n = gcount[i], nth = lower ? (n-1)*Q : n*Q;\n auto begin = gmap[i].begin(), mid = begin + nth, end = begin + n;\n std::nth_element(begin, mid, end);\n nthj[i-1] = (tiesmean && n%2 == 0) ? (*(mid) + *(std::min_element(mid+1, end)))*0.5 : *(mid);\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.8.9 - 1.9.0/fnth_fmedian.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " } else {\n gmap[g[i]][gcount[g[i]]++] = column[i];\n }\n }\n for(int i = 0; i < ng; ++i) {\n if(isnan2(nthj[i]) || gcount[i+1] == 0) continue;\n int n = gcount[i+1], nth = lower ? (n-1)*Q : n*Q;\n auto begin = gmap[i+1].begin(), mid = begin + nth, end = begin + n;\n std::nth_element(begin, mid, end);\n nthj[i] = (tiesmean && n%2 == 0) ? (*(mid) + *(std::min_element(mid+1, end)))*0.5 : *(mid);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "\n }\n for(int i = 0; i != l; ++i) gmap[pg[i]][n[pg[i]]++] = x[i];\n if(narm) {\n for(int gr = 0; gr < ng; ++gr) {\n // const std::vector& temp = gmap[gr]; // wrap() // good ? // const Vector& // better for character strings\n sugar::IndexHash hash(wrap(gmap[gr+1])); // wrap(temp)\n int i = 0, s = hash.n, end = s-1, max = 1, index; // n[s+1] // fastest ? use n ?\n while(isnanT(hash.src[i]) && i!=end) ++i;\n out[gr] = hash.src[i]; // good\n if(i!=end) {\n std::vector n(s+1); // = no_init_vector // better for valgrind\n for( ; i != s; ++i) {\n storage_t val = hash.src[i];\n if(isnanT(val)) continue;\n addr = hash.get_addr(val);\n index = hash.data[addr];\n while(index && hash.not_equal(hash.src[index - 1], val)) {\n ++addr;\n if(addr == static_cast(hash.m)) addr = 0;\n index = hash.data[addr];\n }\n if(!index) {\n hash.data[addr] = i+1;\n ++hash.size_;\n n[i+1] = 1;\n if(nfirstm && max == 1) { // Could also do this at the end in a separate loop. What is faster ? -> This seems better !\n if(lastm) out[gr] = val;\n else if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n } else {\n // if(++n[hash.data[addr]] > max) { // good, or create int index\n // max = n[hash.data[addr]];\n // out[gr] = val;\n // }\n // index = hash.data[addr];\n if(++n[index] >= max) {\n if(lastm || n[index] > max) {\n max = n[index];\n out[gr] = val;\n } else if(nfirstm) {\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n }\n }\n // if(nfirstm && max == 1) { // Above seems better !\n // if(minm) {\n // for(int i = 1; i != s; ++i) if(out[gr] > hash.src[i]) out[gr] = hash.src[i];\n // } else {\n // for(int i = 1; i != s; ++i) if(out[gr] < hash.src[i]) out[gr] = hash.src[i];\n // }\n // }\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "gr] = hash.src[i];\n // }\n // }\n }\n }\n } else {\n for(int gr = 0; gr < ng; ++gr) {\n // const std::vector& temp = gmap[gr]; // good ? // const Vector& // wrap()\n sugar::IndexHash hash(wrap(gmap[gr+1])); // wrap(temp)\n out[gr] = hash.src[0];\n int s = hash.n, max = 1, index; // n[s+1] // fastest ? use n ? and reset partially ?\n std::vector n(s+1); // = no_init_vector // better for valgrind\n for(int i = 0; i != s; ++i) {\n storage_t val = hash.src[i];\n addr = hash.get_addr(val);\n index = hash.data[addr];\n while(index && hash.not_equal(hash.src[index - 1], val)) {\n ++addr;\n if(addr == static_cast(hash.m)) addr = 0;\n index = hash.data[addr];\n }\n if(!index) {\n hash.data[addr] = i+1;\n ++hash.size_;\n n[i+1] = 1;\n if(nfirstm && max == 1) { // Could also do this at the end in a separate loop. What is faster ? -> This seems better !\n if(lastm) out[gr] = val;\n else if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n } else {\n // if(++n[hash.data[addr]] > max) { // good, or create int index\n // max = n[hash.data[addr]];\n // out[gr] = val;\n // }\n if(++n[index] >= max) {\n if(lastm || n[index] > max) {\n max = n[index];\n out[gr] = val;\n } else if(nfirstm) {\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n }\n }\n // if(nfirstm && max == 1) { // Above seems better !\n // if(minm) {\n // for(int i = 1; i != s; ++i) if(out[gr] > hash.src[i]) out[gr] = hash.src[i];\n // } else {\n // for(int i = 1; i != s; ++i) if(out[gr] < hash.src[i]) out[gr] = hash.src[i];\n // }\n // }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " gmap[gi][n[gi]] = x[i];\n wmap[gi][n[gi]++] = pwg[i];\n }\n if(narm) {\n for(int gr = 0; gr < ng; ++gr) {\n // const std::vector& temp = gmap[gr]; // good ? // const Vector& // wrap()\n const std::vector& wtemp = wmap[gr+1];\n sugar::IndexHash hash(wrap(gmap[gr+1])); // wrap(temp)\n int i = 0, s = hash.n, end = s-1, index;\n double max = DBL_MIN; // n[s+1]\n while((isnanT(hash.src[i]) || std::isnan(wtemp[i])) && i!=end) ++i;\n out[gr] = hash.src[i]; // good !\n if(i!=end) {\n std::vector n(s+1); // = no_init_vector // better for valgrind\n for( ; i != s; ++i) {\n storage_t val = hash.src[i];\n if(isnanT(val) || std::isnan(wtemp[i])) continue;\n addr = hash.get_addr(val);\n index = hash.data[addr];\n while(index && hash.not_equal(hash.src[index - 1], val)) {\n ++addr;\n if(addr == static_cast(hash.m)) addr = 0;\n index = hash.data[addr];\n }\n if(!index) {\n hash.data[addr] = i+1;\n ++hash.size_;\n n[i+1] = wtemp[i];\n if(wtemp[i] >= max) { // necessary, because second loop only entered for more than one occurrence of the same value\n if(lastm || wtemp[i] > max) {\n max = wtemp[i];\n out[gr] = val;\n } else if(nfirstm) { // Could also do this at the end in a separate loop. What is faster ??\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n } else {\n n[index] += wtemp[i];\n // if(n[index] > max) {\n // max = n[index];\n // out[gr] = val;\n // }\n if(n[index] >= max) {\n if(lastm || n[index] > max) {\n max = n[index];\n out[gr] = val;\n } else if(nfirstm) {\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n }\n } else {\n for(int gr = 0; gr < ng; ++gr) {\n // const std::vector& temp = gmap[gr]; // good ? // const Vector& // wrap()\n const std::vector& wtemp = wmap[gr+1];\n sugar::IndexHash hash(wrap(gmap[gr+1])); // wrap(temp)\n out[gr] = hash.src[0];\n int s = hash.n, index; // fastest ? use n ? and reset partially ?\n double max = DBL_MIN; // n[s+1];\n std::vector n(s+1); // = no_init_vector // better for valgrind\n for(int i = 0; i != s; ++i) {\n if(std::isnan(wtemp[i])) continue;\n storage_t val = hash.src[i];\n addr = hash.get_addr(val);\n index = hash.data[addr];\n while(index && hash.not_equal(hash.src[index - 1], val)) {\n ++addr;\n if(addr == static_cast(hash.m)) addr = 0;\n index = hash.data[addr];\n }\n if(!index) {\n hash.data[addr] = i+1;\n ++hash.size_;\n n[i+1] = wtemp[i];\n if(wtemp[i] >= max) { // necessary, because second loop only entered for more than one occurrence of the same value\n if(lastm || wtemp[i] > max) {\n max = wtemp[i];\n out[gr] = val;\n } else if(nfirstm) { // Could also do this at the end in a separate loop. What is faster ??\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n } else {\n n[index] += wtemp[i];\n // if(n[index] > max) {\n // max = n[index];\n // out[gr] = val;\n // }\n if(n[index] >= max) {\n if(lastm || n[index] > max) {\n max = n[index];\n out[gr] = val;\n } else if(nfirstm) {\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n }\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "\n }\n for(int i = 0; i != l; ++i) gmap[pg[i]][n[pg[i]]++] = x[i];\n if(narm) {\n for(int gr = 0; gr < ng; ++gr) {\n const std::vector& temp = gmap[gr+1];\n int i = 0, s = temp.size(), end = s-1, max = 1;\n while(temp[i] == NA_INTEGER && i!=end) ++i;\n out[gr] = temp[i];\n if(i!=end) {\n std::vector n(nlevp);\n for( ; i != s; ++i) {\n val = temp[i];\n if(val == NA_INTEGER) continue;\n if(++n[val] >= max) {\n if(lastm || n[val] > max) {\n max = n[val];\n out[gr] = val;\n } else if(nfirstm) {\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n }\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n }\n } else {\n for(int gr = 0; gr < ng; ++gr) {\n const std::vector& temp = gmap[gr+1];\n int tl = temp.size(), max = 1;\n std::vector n(nlevp);\n out[gr] = temp[0];\n for(int i = 0; i != tl; ++i) {\n val = temp[i];\n if(val == NA_INTEGER) val = 0;\n if(++n[val] >= max) {\n if(lastm || n[val] > max) {\n max = n[val];\n out[gr] = val;\n } else if(nfirstm) {\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n }\n if(out[gr] == 0) out[gr] = NA_INTEGER;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " gmap[gi][n[gi]] = x[i];\n wmap[gi][n[gi]++] = pwg[i];\n }\n if(narm) {\n for(int gr = 0; gr < ng; ++gr) {\n const std::vector& temp = gmap[gr+1];\n const std::vector& wtemp = wmap[gr+1];\n int i = 0, s = temp.size(), end = s-1;\n double max = DBL_MIN;\n while((temp[i] == NA_INTEGER || std::isnan(wtemp[i])) && i!=end) ++i;\n out[gr] = temp[i];\n if(i!=end) {\n std::vector n(nlevp);\n for( ; i != s; ++i) {\n val = temp[i];\n if(val == NA_INTEGER || std::isnan(wtemp[i])) continue;\n n[val] += wtemp[i];\n if(n[val] >= max) {\n if(lastm || n[val] > max) {\n max = n[val];\n out[gr] = val;\n } else if(nfirstm) {\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n }\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n }\n } else {\n for(int gr = 0; gr < ng; ++gr) {\n const std::vector& temp = gmap[gr+1];\n const std::vector& wtemp = wmap[gr+1];\n int tl = temp.size();\n double max = DBL_MIN;\n std::vector n(nlevp);\n out[gr] = temp[0];\n for(int i = 0; i != tl; ++i) {\n if(std::isnan(wtemp[i])) continue;\n val = temp[i];\n if(val == NA_INTEGER) val = 0;\n n[val] += wtemp[i];\n if(n[val] >= max) {\n if(lastm || n[val] > max) {\n max = n[val];\n out[gr] = val;\n } else if(nfirstm) {\n if(minm) {\n if(out[gr] > val) out[gr] = val;\n } else {\n if(out[gr] < val) out[gr] = val;\n }\n }\n }\n }\n if(out[gr] == 0) out[gr] = NA_INTEGER;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " = x.size();\n List out(l);\n\n if(ng == 0 && nthreads > 1) {\n if(nthreads > l) nthreads = l;\n for(int j = 0; j < l; ++j) out[j] = fmodeCpp(x[j], ng, g, gs, w, narm, ret, 1);\n } else {\n for(int j = l; j--; ) out[j] = fmodeCpp(x[j], ng, g, gs, w, narm, ret, nthreads);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fmode.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "o_init_matrix(ng, col);\n\n if(ng == 0 && nthreads > 1) {\n if(nthreads > col) nthreads = col;\n for(int j = 0; j < col; ++j) out(_, j) = fmodeImpl(x(_, j), ng, g, gs, w, narm, ret, 1);\n } else {\n for(int j = col; j--; ) out(_, j) = fmodeImpl(x(_, j), ng, g, gs, w, narm, ret, nthreads);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "tm1 = out.begin()-1;\n if(Rf_isNull(gs)) {\n for(int i = 0; i != l; ++i) ++outm1[g[i]];\n for(int i = 0; i < ng; ++i) {\n // if(out[i] == 0) stop(\"Group size of 0 encountered. This is probably due to unused factor levels. Use fdroplevels(f) to drop them.\");\n if(out[i] > 0) {\n gmap[i+1] = std::vector (out[i]);\n out[i] = 0;\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " {\n IntegerVector gsv = gs;\n if(ng != gsv.size()) stop(\"ng must match length(gs)\");\n for(int i = 0; i < ng; ++i) {\n // if(gsv[i] == 0) stop(\"Group size of 0 encountered. This is probably due to unused factor levels. Use fdroplevels(f) to drop them.\");\n if(gsv[i] > 0) gmap[i+1] = std::vector (gsv[i]);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n }\n for(int i = 0; i != l; ++i) gmap[g[i]][outm1[g[i]]++] = x[i];\n if(narm) {\n for(int gr = 0; gr < ng; ++gr) {\n if(out[gr] == 0) continue;\n // const std::vector& temp = gmap[gr+1]; // good ? // const Vector& // wrap()\n sugar::IndexHash hash(wrap(gmap[gr+1])); // temp\n for(int i = hash.n; i--; ) {\n storage_t val = hash.src[i];\n if(isnanT(val)) continue;\n addr = hash.get_addr(val);\n while(hash.data[addr] && hash.not_equal(hash.src[hash.data[addr] - 1], val)) {\n ++addr;\n if(addr == static_cast(hash.m)) addr = 0;\n }\n if(!hash.data[addr]) {\n hash.data[addr] = i+1;\n ++hash.size_;\n }\n }\n out[gr] = hash.size_;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " ++hash.size_;\n }\n }\n out[gr] = hash.size_;\n }\n } else {\n for(int gr = 0; gr < ng; ++gr) {\n if(out[gr] == 0) continue;\n sugar::IndexHash hash(wrap(gmap[gr+1]));\n hash.fill();\n out[gr] = hash.size_;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "out.begin()-1;\n if(Rf_isNull(gs)) {\n for(int i = 0; i != l; ++i) ++outm1[g[i]];\n for(int i = 0; i < ng; ++i) {\n // if(out[i] == 0) stop(\"Group size of 0 encountered. This is probably due to unused factor levels. Use fdroplevels(f) to drop them.\");\n if(out[i] > 0) {\n gmap[i+1] = std::vector (out[i]);\n out[i] = 0;\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " IntegerVector gsv = gs;\n if(ng != gsv.size()) stop(\"ng must match length(gs)\");\n for(int i = 0; i < ng; ++i) {\n // if(gsv[i] == 0) stop(\"Group size of 0 encountered. This is probably due to unused factor levels. Use fdroplevels(f) to drop them.\");\n if(gsv[i] > 0) gmap[i+1] = std::vector (gsv[i]);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n for(int i = 0; i != l; ++i) gmap[g[i]][outm1[g[i]]++] = x[i];\n if(narm) {\n for(int gr = 0; gr < ng; ++gr) {\n if(out[gr] == 0) continue;\n const std::vector& temp = gmap[gr+1];\n n = 1;\n std::vector uxp(nlevp, true);\n for(int i = temp.size(); i--; ) {\n if(temp[i] != NA_INTEGER && uxp[temp[i]]) { // save xi = x[i] ? Faster ?\n uxp[temp[i]] = false;\n if(++n == nlevp) break;\n }\n }\n out[gr] = n - 1;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "nlevp) break;\n }\n }\n out[gr] = n - 1;\n }\n } else {\n for(int gr = 0; gr < ng; ++gr) {\n if(out[gr] == 0) continue;\n const std::vector& temp = gmap[gr+1];\n bool anyNA = false;\n n = 1;\n std::vector uxp(nlevp, true);\n for(int i = temp.size(); i--; ) {\n if(temp[i] == NA_INTEGER) {\n anyNA = true;\n continue;\n }\n if(uxp[temp[i]]) { // save xi = x[i] ? Faster ?\n uxp[temp[i]] = false;\n if(++n == nlevp && anyNA) break;\n }\n }\n out[gr] = n + anyNA - 1;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " > 1 && ((ng == 0 && l > 1) || (ng != 0 && nthreads < l))) {\n if(nthreads > l) nthreads = l;\n for(int j = 0; j < l; ++j) out[j] = fndistinctCpp(x[j], ng, g, gs, narm, 1);\n } else {\n for(int j = l; j--; ) out[j] = fndistinctCpp(x[j], ng, g, gs, narm, nthreads);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ntegerVector out = no_init_vector(col);\n if(nthreads > col) nthreads = col;\n if(narm) {\n for(int j = 0; j < col; ++j) {\n // ConstMatrixColumn column = x(_ , j);\n sugar::IndexHash hash(wrap(x(_ , j))); // wrap(column) // why wrap needed ?\n for(int i = 0; i != l; ++i) {\n storage_t val = hash.src[i];\n if(isnanT(val)) continue;\n addr = hash.get_addr(val);\n while(hash.data[addr] && hash.not_equal(hash.src[hash.data[addr] - 1], val)) {\n ++addr;\n if(addr == static_cast(hash.m)) addr = 0;\n }\n if(!hash.data[addr]) {\n hash.data[addr] = i+1;\n ++hash.size_;\n }\n }\n out[j] = hash.size_;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " ++hash.size_;\n }\n }\n out[j] = hash.size_;\n }\n } else {\n for(int j = 0; j < col; ++j) {\n // ConstMatrixColumn column = x(_ , j);\n sugar::IndexHash hash(wrap(x(_ , j))); // wrap(column) // why wrap needed ?\n hash.fill();\n out[j] = hash.size_;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ll(gs)) {\n // memset(n, 0, sizeof(int)*ng);\n for(int i = 0; i != l; ++i) ++n[g[i]];\n for(int i = 1; i < ngp; ++i) {\n // if(n[i] == 0) stop(\"Group size of 0 encountered. This is probably because of unused factor levels. Use fdroplevels(f) to drop them.\");\n if(n[i] > 0) gmap[i] = std::vector (n[i]);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " {\n IntegerVector gsv = gs;\n if(ng != gsv.size()) stop(\"ng must match length(gs)\");\n for(int i = 0; i < ng; ++i) {\n // if(gsv[i] == 0) stop(\"Group size of 0 encountered. This is probably because of unused factor levels. Use fdroplevels(f) to drop them.\");\n if(gsv[i] > 0) gmap[i+1] = std::vector (gsv[i]);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " i != l; ++i) gmap[g[i]][n[g[i]]++] = column[i]; // reading in all the values. Better way ?\n for(int gr = 0; gr < ng; ++gr) {\n if(n[gr+1] == 0) continue;\n // const std::vector& temp = gmap[gr+1]; // good ? // const Vector& // wrap()\n sugar::IndexHash hash(wrap(gmap[gr+1])); // wrap(temp)\n for(int i = hash.n; i--; ) {\n storage_t val = hash.src[i];\n if(isnanT(val)) continue;\n addr = hash.get_addr(val);\n while(hash.data[addr] && hash.not_equal(hash.src[hash.data[addr] - 1], val)) {\n ++addr;\n if(addr == static_cast(hash.m)) addr = 0;\n }\n if(!hash.data[addr]) {\n hash.data[addr] = i+1;\n ++hash.size_;\n }\n }\n outj[gr] = hash.size_;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.7.6 - 1.8.0/fndistinct.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " i != l; ++i) gmap[g[i]][n[g[i]]++] = column[i]; // reading in all the values. Better way ?\n for(int gr = 0; gr < ng; ++gr) {\n if(n[gr+1] == 0) continue;\n sugar::IndexHash hash(wrap(gmap[gr+1]));\n hash.fill();\n outj[gr] = hash.size_;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": "ing range_str\"); // # nocov\n // savetl_init() has already been called at the start of forder\n // for(int i=0; i=0) { // another thread may have set it while I was waiting, so check it again\n if (TRUELENGTH(s)>0) // save any of R's own usage of tl (assumed positive, so we can both count and save in one scan), to restore\n savetl(s); // afterwards. From R 2.14.0, tl is initialized to 0, prior to that it was random so this step saved too much.\n // now save unique SEXP in ustr so i) we can loop through them afterwards and reset TRUELENGTH to 0 and ii) sort uniques when sorting too\n if (ustr_alloc<=ustr_n) {\n ustr_alloc = (ustr_alloc==0) ? 16384 : ustr_alloc*2; // small initial guess, negligible time to alloc 128KB (32 pages)\n if (ustr_alloc>n) ustr_alloc = n; // clamp at n. Reaches n when fully unique (no dups)\n ustr = realloc(ustr, ustr_alloc * sizeof(SEXP));\n if (ustr==NULL) STOP(\"Unable to realloc %d * %d bytes in range_str\", ustr_alloc, (int)sizeof(SEXP)); // # nocov\n }\n ustr[ustr_n++] = s;\n SET_TRUELENGTH(s, -ustr_n); // unique in any order is fine. first-appearance order is achieved later in count_group\n if (LENGTH(s)>ustr_maxlen) ustr_maxlen=LENGTH(s);\n if (!anyneedutf8 && NEED2UTF8(s)) anyneedutf8=true;\n }\n } #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": " SEXP ans = PROTECT(allocVector(INTSXP, nrow)); n_protect++;\n anso = INTEGER(ans);\n TEND(0)\n // for (int i=0; i0)\n int spare=0; // the amount of bits remaining on the right of the current nradix byte\n bool isReal=false;\n bool complexRerun = false; // see comments below in CPLXSXP case\n SEXP CplxPart = R_NilValue;\n if (n_cplx) { CplxPart=PROTECT(allocVector(REALSXP, nrow)); n_protect++; } #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": "\n switch(TYPEOF(x)) {\n case INTSXP : case LGLSXP : {\n int32_t *xd = INTEGER(x);\n // for (int i=0; i #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": "EALSXP :\n if (inherits(x, \"integer64\")) {\n int64_t *xd = (int64_t *)REAL(x);\n // for (int i=0; i #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": "ion (skip bytes/mult by 10,100 etc) as currently it's often 6-8 bytes even for 3.14,3.15\n // for (int i=0; i #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": "ITE_KEY\n }\n }\n break;\n case STRSXP : {\n SEXP *xd = STRING_PTR(x);\n // for (int i=0; i #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": " anso will contain 0's for the NAs and will be considered not-sorted.\n bool stop = false;\n // for (int i=0; i #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": "if (!TMP) STOP(\"Unable to allocate TMP for my_n=%d items in parallel batch counting\", my_n);\n // for (int batch=0; batch #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": "int currently (in future 8). To save team startup cost (but unlikely significant anyway)\n // for (int batch=0; batch #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule(dynamic) num_threads(getDTthreads())", "context_chars": 100, "text": "o start a parallel team and there will be no nestedness here either.\n if (retgrp) {\n // for (int i=0; i #pragma omp parallel for ordered schedule(dynamic) num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/misc/legacy/sorted out 1.1.0 - 1.2.0/data.table_forder.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) num_threads(getDTthreads())", "context_chars": 100, "text": " // flush() is only relevant when retgrp==true so save the redundant ordered clause\n // for (int i=0; i #pragma omp parallel for schedule(dynamic) num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "tithreaded...\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x)-1;\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_double(px + pst[gr], po, pgs[gr], 1, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_int(px + pst[gr], po, pgs[gr], 1, narm, ret, Q);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_int(px + pst[gr], po, pgs[gr], 1, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n } else { // Not sorted. Perhaps reordering x is faster?\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_double(px, po + pst[gr], pgs[gr], 0, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_int(px, po + pst[gr], pgs[gr], 0, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ng x is faster?\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_double(px, po + pst[gr], pgs[gr], 0, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_int(px, po + pst[gr], pgs[gr], 0, narm, ret, Q);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_int(px, po + pst[gr], pgs[gr], 0, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n }\n\n if(ATTRIB(x) != R_NilValue && !(isObject(x) && inherits(x, \"ts\"))) copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n}\n\n// Expects pointer po to be decremented by 1\nSEXP nth_g_impl_noalloc(SEXP x, int ng, int *pgs, int *po, int *pst, int sorted, int narm, int ret, double Q, void* x_cc) {\n\n SEXP res = PROTECT(allocVector(REALSXP, ng));\n double *pres = REAL(res);\n\n if(sorted) {\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x)-1;\n for(int gr = 0; gr != ng; ++gr) pres[gr] = nth_double_noalloc(px + pst[gr], po, x_cc, pgs[gr], 1, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n for(int gr = 0; gr != ng; ++gr) pres[gr] = nth_int_noalloc(px + pst[gr], po, x_cc, pgs[gr], 1, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n } else {\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n for(int gr = 0; gr != ng; ++gr) pres[gr] = nth_double_noalloc(px, po + pst[gr], x_cc, pgs[gr], 0, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x);\n for(int gr = 0; gr != ng; ++gr) pres[gr] = nth_int_noalloc(px, po + pst[gr], x_cc, pgs[gr], 0, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n }\n\n if(ATTRIB(x) != R_NilValue && !(isObject(x) && inherits(x, \"ts\"))) copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n} #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "e *pres = REAL(res);\n\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x)-1;\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_double_ord(px, po + pst[gr], pgs[gr], narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_int_ord(px, po + pst[gr], pgs[gr], narm, ret, Q);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = nth_int_ord(px, po + pst[gr], pgs[gr], narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n\n if(ATTRIB(x) != R_NilValue && !(isObject(x) && inherits(x, \"ts\"))) copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n}\n\n// Expects pointers pw and po to be decremented by 1\nSEXP w_nth_g_ord_impl(SEXP x, double *pw, int ng, int *pgs, int *po, int *pst, int narm, int ret, double Q, int nthreads) {\n\n if(nthreads > ng) nthreads = ng;\n\n SEXP res = PROTECT(allocVector(REALSXP, ng));\n double *pres = REAL(res);\n\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x)-1;\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_double_ord(px, pw, po + pst[gr], DBL_MIN, pgs[gr], narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_ord(px, pw, po + pst[gr], DBL_MIN, pgs[gr], narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n\n if(ATTRIB(x) != R_NilValue && !(isObject(x) && inherits(x, \"ts\"))) copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n} #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "e *pres = REAL(res);\n\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x)-1;\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_double_ord(px, pw, po + pst[gr], DBL_MIN, pgs[gr], narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_ord(px, pw, po + pst[gr], DBL_MIN, pgs[gr], narm, ret, Q);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_ord(px, pw, po + pst[gr], DBL_MIN, pgs[gr], narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n\n if(ATTRIB(x) != R_NilValue && !(isObject(x) && inherits(x, \"ts\"))) copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n}\n\n// Expects pointers pw and po to be decremented by 1\nSEXP w_nth_g_qsort_impl(SEXP x, double *pw, int ng, int *pgs, int *po, int *pst, int sorted, int narm, int ret, double Q, int nthreads) {\n\n if(nthreads > ng) nthreads = ng;\n\n SEXP res = PROTECT(allocVector(REALSXP, ng));\n double *pres = REAL(res);\n\n if(sorted) { // sorted by groups: need to offset both px and pw\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x)-1;\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_double_qsort(px + pst[gr], pw + pst[gr], po, DBL_MIN, pgs[gr], 1, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_qsort(px + pst[gr], pw + pst[gr], po, DBL_MIN, pgs[gr], 1, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n } else {\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_double_qsort(px, pw, po + pst[gr], DBL_MIN, pgs[gr], 0, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_qsort(px, pw, po + pst[gr], DBL_MIN, pgs[gr], 0, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n }\n\n if(ATTRIB(x) != R_NilValue && !(isObject(x) && inherits(x, \"ts\"))) copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n} #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "oth px and pw\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x)-1;\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_double_qsort(px + pst[gr], pw + pst[gr], po, DBL_MIN, pgs[gr], 1, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_qsort(px + pst[gr], pw + pst[gr], po, DBL_MIN, pgs[gr], 1, narm, ret, Q);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x)-1;\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_qsort(px + pst[gr], pw + pst[gr], po, DBL_MIN, pgs[gr], 1, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n } else {\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_double_qsort(px, pw, po + pst[gr], DBL_MIN, pgs[gr], 0, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_qsort(px, pw, po + pst[gr], DBL_MIN, pgs[gr], 0, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n } else {\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_double_qsort(px, pw, po + pst[gr], DBL_MIN, pgs[gr], 0, narm, ret, Q);\n break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_qsort(px, pw, po + pst[gr], DBL_MIN, pgs[gr], 0, narm, ret, Q);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case INTSXP:\n case LGLSXP: {\n int *px = INTEGER(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = w_nth_int_qsort(px, pw, po + pst[gr], DBL_MIN, pgs[gr], 0, narm, ret, Q);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(TYPEOF(x)));\n }\n }\n\n if(ATTRIB(x) != R_NilValue && !(isObject(x) && inherits(x, \"ts\"))) copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n}\n\n\n\n// Functions for Export --------------------------------------------------------\n\nint Rties2int(SEXP x) {\n int tx = TYPEOF(x);\n if(tx == INTSXP || tx == REALSXP || tx == LGLSXP) {\n int ret = asInteger(x);\n if(ret < 1 || ret > 9 || ret == 4) error(\"ties must be 1, 2, 3 or 5-9, you supplied: %d\", ret);\n return ret;\n }\n if(tx != STRSXP) error(\"ties must be integer or character\");\n const char * r = CHAR(STRING_ELT(x, 0)); // translateCharUTF8()\n if(strcmp(r, \"mean\") == 0) return 1;\n if(strcmp(r, \"min\") == 0) return 2;\n if(strcmp(r, \"max\") == 0) return 3;\n if(strcmp(r, \"q5\") == 0) return 5;\n if(strcmp(r, \"q6\") == 0) return 6;\n if(strcmp(r, \"q7\") == 0) return 7;\n if(strcmp(r, \"q8\") == 0) return 8;\n if(strcmp(r, \"q9\") == 0) return 9;\n error(\"Unknown ties option: %s\", r);\n} #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fnth_fmedian_fquantile.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " \\\n}\n/* Multithreading: does not work with radixorder\n * } else {\n for(int j = 0; j < l; ++j) {\n int *pxo = (int *) Calloc(nrx, int);\n // num1radixsort(pxo, TRUE, FALSE, px[j]); // Probably cannot be parallelized, can try R_orderVector1()\n // R_orderVector1(pxo, nrx, px[j], TRUE, FALSE); // Also not thread safe, and also 0-indexed.\n // for(int i = 0; i < nrx; ++i) pxo[i] += 1;\n pout[j] = w_nth_ord_impl_dbl(px[j], pxo, pw, narm, ret, Q, h);\n Free(pxo);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)", "context_chars": 100, "text": " int j = 1;\n sum = px[0];\n while(ISNAN(sum) && j != l) sum = px[j++];\n if(j != l) {\n for(int i = j; i < l; ++i) sum += NISNAN(px[i]) ? px[i] : 0.0;\n } else if(narm == 2) sum = 0.0;\n } else {\n sum = 0;\n #pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)\n for(int i = 0; i < l; ++i) sum += px[i]; // Cannot have break statements in OpenMP for loop\n } #pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)", "context_chars": 100, "text": "sum += NISNAN(px[i]) ? px[i] : 0.0;\n } else if(narm == 2) sum = 0.0;\n } else {\n sum = 0;\n for(int i = 0; i < l; ++i) sum += px[i]; // Cannot have break statements in OpenMP for loop\n }\n return sum;\n}\n\n// This is unsafe...\n// void fsum_double_g_omp_impl(double *restrict pout, double *restrict px, int ng, int *restrict pg, int narm, int l, int nthreads) {\n// if(narm) {\n// for(int i = ng; i--; ) pout[i] = NA_REAL;\n// #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])\n// for(int i = 0; i < l; ++i) {\n// if(!ISNAN(px[i])) {\n// if(ISNAN(pout[pg[i]-1])) pout[pg[i]-1] = px[i];\n// else pout[pg[i]-1] += px[i];\n// }\n// }\n// } else {\n// memset(pout, 0, sizeof(double) * ng);\n// #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng]) // shared(pout)\n// for(int i = 0; i < l; ++i) {\n// // #pragma omp atomic\n// pout[pg[i]-1] += px[i]; // Used to stop loop when all groups passed with NA, but probably no speed gain since groups are mostly ordered.\n// }\n// }\n// } #pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])", "context_chars": 100, "text": "arm, int l, int nthreads) {\n// if(narm) {\n// for(int i = ng; i--; ) pout[i] = NA_REAL;\n// // for(int i = 0; i < l; ++i) {\n// if(!ISNAN(px[i])) {\n// if(ISNAN(pout[pg[i]-1])) pout[pg[i]-1] = px[i];\n// else pout[pg[i]-1] += px[i];\n// }\n// } #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng]) ", "context_chars": 100, "text": "-1] += px[i];\n// }\n// }\n// } else {\n// memset(pout, 0, sizeof(double) * ng);\n// // for(int i = 0; i < l; ++i) {\n// // #pragma omp atomic\n// pout[pg[i]-1] += px[i]; // Used to stop loop when all groups passed with NA, but probably no speed gain since groups are mostly ordered.\n// } #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng]) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)", "context_chars": 100, "text": "hile(j!=l && (ISNAN(px[j]) || ISNAN(pw[j]))) ++j;\n if(j != l) {\n sum = px[j] * pw[j];\n for(int i = j+1; i < l; ++i) sum += (NISNAN(px[i]) && NISNAN(pw[i])) ? px[i] * pw[i] : 0.0;\n } else sum = narm == 1 ? NA_REAL : 0.0;\n } else {\n sum = 0;\n #pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)\n for(int i = 0; i < l; ++i) sum += px[i] * pw[i];\n } #pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)", "context_chars": 100, "text": "i])) ? px[i] * pw[i] : 0.0;\n } else sum = narm == 1 ? NA_REAL : 0.0;\n } else {\n sum = 0;\n for(int i = 0; i < l; ++i) sum += px[i] * pw[i];\n }\n return sum;\n}\n\n// This is unsafe...\n// void fsum_weights_g_omp_impl(double *restrict pout, double *restrict px, int ng, int *restrict pg, double *restrict pw, int narm, int l, int nthreads) {\n// if(narm) {\n// for(int i = ng; i--; ) pout[i] = NA_REAL;\n// #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])\n// for(int i = 0; i < l; ++i) {\n// if(ISNAN(px[i]) || ISNAN(pw[i])) continue;\n// if(ISNAN(pout[pg[i]-1])) pout[pg[i]-1] = px[i] * pw[i];\n// else pout[pg[i]-1] += px[i] * pw[i];\n// }\n// } else {\n// memset(pout, 0, sizeof(double) * ng);\n// #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])\n// for(int i = 0; i < l; ++i) pout[pg[i]-1] += px[i] * pw[i]; // Used to stop loop when all groups passed with NA, but probably no speed gain since groups are mostly ordered.\n// }\n// } #pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])", "context_chars": 100, "text": "arm, int l, int nthreads) {\n// if(narm) {\n// for(int i = ng; i--; ) pout[i] = NA_REAL;\n// // for(int i = 0; i < l; ++i) {\n// if(ISNAN(px[i]) || ISNAN(pw[i])) continue;\n// if(ISNAN(pout[pg[i]-1])) pout[pg[i]-1] = px[i] * pw[i];\n// else pout[pg[i]-1] += px[i] * pw[i];\n// } #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])", "context_chars": 100, "text": "[i]-1] += px[i] * pw[i];\n// }\n// } else {\n// memset(pout, 0, sizeof(double) * ng);\n// // for(int i = 0; i < l; ++i) pout[pg[i]-1] += px[i] * pw[i]; // Used to stop loop when all groups passed with NA, but probably no speed gain since groups are mostly ordered.\n// }\n// }\n\n\n// using long long internally is substantially faster than using doubles !!\ndouble fsum_int_impl(const int *restrict px, const int narm, const int l) {\n long long sum;\n if(narm) {\n int j = l-1;\n while(px[j] == NA_INTEGER && j!=0) --j;\n sum = (long long)px[j];\n if(j == 0 && px[j] == NA_INTEGER) return narm == 1 ? NA_REAL : 0;\n for(int i = j; i--; ) if(px[i] != NA_INTEGER) sum += (long long)px[i];\n } else {\n sum = 0;\n for(int i = 0; i != l; ++i) {\n if(px[i] == NA_INTEGER) return NA_REAL; // Need this, otherwise result is incorrect !!\n sum += (long long)px[i];\n }\n }\n return (double)sum;\n} #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)", "context_chars": 100, "text": "if(j == l && px[j-1] == NA_INTEGER) return narm == 1 ? NA_REAL : 0;\n sum = (long long)px[j];\n for(int i = j+1; i < l; ++i) sum += px[i] != NA_INTEGER ? (long long)px[i] : 0;\n } else {\n if(px[0] == NA_INTEGER || px[l-1] == NA_INTEGER) return NA_REAL;\n sum = 0;\n #pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)\n for(int i = 0; i < l; ++i) sum += (long long)px[i]; // Need this, else wrong result\n } #pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)", "context_chars": 100, "text": "0;\n } else {\n if(px[0] == NA_INTEGER || px[l-1] == NA_INTEGER) return NA_REAL;\n sum = 0;\n for(int i = 0; i < l; ++i) sum += (long long)px[i]; // Need this, else wrong result\n }\n return (double)sum;\n}\n\n// This is unsafe...\n// void fsum_int_g_omp_impl(int *restrict pout, int *restrict px, int ng, int *restrict pg, int narm, int l, int nthreads) {\n// long long ckof;\n// if(narm) {\n// for(int i = ng; i--; ) pout[i] = NA_INTEGER;\n// int lsi;\n// #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])\n// for(int i = 0; i < l; ++i) {\n// if(px[i] != NA_INTEGER) {\n// lsi = pout[pg[i]-1];\n// if(lsi == NA_INTEGER) pout[pg[i]-1] = px[i];\n// else {\n// ckof = (long long)lsi + px[i];\n// if(ckof > INT_MAX || ckof <= INT_MIN) error(\"Integer overflow in one or more groups. Integers in R are bounded between 2,147,483,647 and -2,147,483,647. The sum within each group should be in that range.\");\n// pout[pg[i]-1] = (int)ckof;\n// }\n// }\n// }\n// } else {\n// memset(pout, 0, sizeof(int) * ng);\n// int lsi;\n// #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])\n// for(int i = 0; i < l; ++i) {\n// if(px[i] == NA_INTEGER) {\n// pout[pg[i]-1] = NA_INTEGER;\n// continue;\n// }\n// lsi = pout[pg[i]-1];\n// if(lsi != NA_INTEGER) { // Used to stop loop when all groups passed with NA, but probably no speed gain since groups are mostly ordered.\n// ckof = (long long)lsi + px[i];\n// if(ckof > INT_MAX || ckof <= INT_MIN) error(\"Integer overflow in one or more groups. Integers in R are bounded between 2,147,483,647 and -2,147,483,647. The sum within each group should be in that range.\");\n// pout[pg[i]-1] = (int)ckof;\n// }\n// }\n// }\n// } #pragma omp parallel for simd num_threads(nthreads) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])", "context_chars": 100, "text": "ng ckof;\n// if(narm) {\n// for(int i = ng; i--; ) pout[i] = NA_INTEGER;\n// int lsi;\n// // for(int i = 0; i < l; ++i) {\n// if(px[i] != NA_INTEGER) {\n// lsi = pout[pg[i]-1];\n// if(lsi == NA_INTEGER) pout[pg[i]-1] = px[i];\n// else {\n// ckof = (long long)lsi + px[i];\n// if(ckof > INT_MAX || ckof <= INT_MIN) error(\"Integer overflow in one or more groups. Integers in R are bounded between 2,147,483,647 and -2,147,483,647. The sum within each group should be in that range.\");\n// pout[pg[i]-1] = (int)ckof;\n// }\n// }\n// } #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])", "context_chars": 100, "text": "\n// }\n// }\n// } else {\n// memset(pout, 0, sizeof(int) * ng);\n// int lsi;\n// // for(int i = 0; i < l; ++i) {\n// if(px[i] == NA_INTEGER) {\n// pout[pg[i]-1] = NA_INTEGER;\n// continue;\n// }\n// lsi = pout[pg[i]-1];\n// if(lsi != NA_INTEGER) { // Used to stop loop when all groups passed with NA, but probably no speed gain since groups are mostly ordered.\n// ckof = (long long)lsi + px[i];\n// if(ckof > INT_MAX || ckof <= INT_MIN) error(\"Integer overflow in one or more groups. Integers in R are bounded between 2,147,483,647 and -2,147,483,647. The sum within each group should be in that range.\");\n// pout[pg[i]-1] = (int)ckof;\n// }\n// } #pragma omp parallel for num_threads(nthreads) reduction(+:pout[:ng])"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": ") pout[j] = fsum_double_impl(px + j*l, narm, l);\n } else if(col >= nthreads) {\n for(int j = 0; j < col; ++j) pout[j] = fsum_double_impl(px + j*l, narm, l);\n } else {\n for(int j = 0; j != col; ++j) pout[j] = fsum_double_omp_impl(px + j*l, narm, l, nthreads);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "*l, ng, pg, narm, l);\n } else {\n if(nthreads > col) nthreads = col;\n for(int j = 0; j < col; ++j) fsum_double_g_impl(pout + j*ng, px + j*l, ng, pg, narm, l);\n }\n }\n break;\n }\n case INTSXP: {\n int *px = INTEGER(x);\n if(ng > 0) {\n int *pout = INTEGER(out);\n if(nthreads <= 1 || col == 1) {\n for(int j = 0; j != col; ++j) fsum_int_g_impl(pout + j*ng, px + j*l, ng, pg, narm, l);\n } else {\n if(nthreads > col) nthreads = col;\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) fsum_int_g_impl(pout + j*ng, px + j*l, ng, pg, narm, l);\n }\n } else {\n double *restrict pout = REAL(out);\n int anyoutl = 0;\n if(nthreads <= 1) {\n for(int j = 0; j != col; ++j) {\n double sumj = fsum_int_impl(px + j*l, narm, l);\n if(sumj > INT_MAX || sumj <= INT_MIN) anyoutl = 1;\n pout[j] = sumj;\n }\n } else if(col >= nthreads) { // If high-dimensional: column-level parallelism\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n double sumj = fsum_int_impl(px + j*l, narm, l);\n if(sumj > INT_MAX || sumj <= INT_MIN) anyoutl = 1;\n pout[j] = sumj;\n }\n } else {\n for(int j = 0; j != col; ++j) {\n double sumj = fsum_int_omp_impl(px + j*l, narm, l, nthreads);\n if(sumj > INT_MAX || sumj <= INT_MIN) anyoutl = 1;\n pout[j] = sumj;\n }\n }\n if(anyoutl == 0) {\n out = PROTECT(coerceVector(out, INTSXP));\n matCopyAttr(out, x, Rdrop, ng);\n UNPROTECT(nprotect + 1);\n return out;\n }\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "*l, ng, pg, narm, l);\n } else {\n if(nthreads > col) nthreads = col;\n for(int j = 0; j < col; ++j) fsum_int_g_impl(pout + j*ng, px + j*l, ng, pg, narm, l);\n }\n } else {\n double *restrict pout = REAL(out);\n int anyoutl = 0;\n if(nthreads <= 1) {\n for(int j = 0; j != col; ++j) {\n double sumj = fsum_int_impl(px + j*l, narm, l);\n if(sumj > INT_MAX || sumj <= INT_MIN) anyoutl = 1;\n pout[j] = sumj;\n }\n } else if(col >= nthreads) { // If high-dimensional: column-level parallelism\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n double sumj = fsum_int_impl(px + j*l, narm, l);\n if(sumj > INT_MAX || sumj <= INT_MIN) anyoutl = 1;\n pout[j] = sumj;\n }\n } else {\n for(int j = 0; j != col; ++j) {\n double sumj = fsum_int_omp_impl(px + j*l, narm, l, nthreads);\n if(sumj > INT_MAX || sumj <= INT_MIN) anyoutl = 1;\n pout[j] = sumj;\n }\n }\n if(anyoutl == 0) {\n out = PROTECT(coerceVector(out, INTSXP));\n matCopyAttr(out, x, Rdrop, ng);\n UNPROTECT(nprotect + 1);\n return out;\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " } else if(col >= nthreads) { // If high-dimensional: column-level parallelism\n for(int j = 0; j < col; ++j) {\n double sumj = fsum_int_impl(px + j*l, narm, l);\n if(sumj > INT_MAX || sumj <= INT_MIN) anyoutl = 1;\n pout[j] = sumj;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "++j) pout[j] = fsum_weights_impl(px + j*l, pw, narm, l);\n } else if(col >= nthreads) {\n for(int j = 0; j < col; ++j) pout[j] = fsum_weights_impl(px + j*l, pw, narm, l);\n } else {\n for(int j = 0; j != col; ++j) pout[j] = fsum_weights_omp_impl(px + j*l, pw, narm, l, nthreads);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": ", px + j*l, ng, pg, pw, narm, l);\n } else {\n if(nthreads > col) nthreads = col;\n for(int j = 0; j < col; ++j) fsum_weights_g_impl(pout + j*ng, px + j*l, ng, pg, pw, narm, l);\n }\n }\n }\n matCopyAttr(out, x, Rdrop, ng);\n UNPROTECT(nprotect);\n return out;\n}\n\n// For safe multithreading across data frame columns\n\ndouble fsum_impl_dbl(SEXP x, int narm, int nthreads) {\n int l = length(x);\n if(l < 1) return NA_REAL;\n if(nthreads <= 1) switch(TYPEOF(x)) {\n case REALSXP: return fsum_double_impl(REAL(x), narm, l);\n case LGLSXP:\n case INTSXP: return fsum_int_impl(INTEGER(x), narm, l);\n default: error(\"Unsupported SEXP type: '%s'\", type2char(TYPEOF(x)));\n }\n switch(TYPEOF(x)) {\n case REALSXP: return fsum_double_omp_impl(REAL(x), narm, l, nthreads);\n case LGLSXP:\n case INTSXP: return fsum_int_omp_impl(INTEGER(x), narm, l, nthreads);\n default: error(\"Unsupported SEXP type: '%s'\", type2char(TYPEOF(x)));\n }\n} #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "!= R_NilValue && !(isObject(xj) && inherits(xj, \"ts\"))) copyMostAttrib(xj, outj);\n }\n for(int j = 0; j < l; ++j) fsum_g_omp_impl(px[j], DATAPTR(pout[j]), ng, pg, narm);\n } else {\n for(int j = 0; j != l; ++j) pout[j] = fsum_g_impl(px[j], ng, pg, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "up = 1;}\n SET_VECTOR_ELT(x, j, coerceVector(xj, REALSXP));\n }\n }\n for(int j = 0; j < l; ++j) fsum_weights_g_impl(REAL(pout[j]), REAL(px[j]), ng, pg, pw, narm, nrx);\n } else {\n for(int j = 0; j != l; ++j) pout[j] = fsum_wg_impl(px[j], ng, pg, pw, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "/ if(ng == 0) {\n// if(cmth) { // If high-dimensional: column-level parallelism\n// // for(int j = 0; j < col; ++j) fsum_double_impl(pout + j, px + j*l, narm, l);\n// } else {\n// for(int j = 0; j != col; ++j) fsum_double_omp_impl(pout + j, px + j*l, narm, l, nthreads);\n// } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n// } else {\n// if(cmth) { // If high-dimensional: column-level parallelism\n// // for(int j = 0; j < col; ++j) fsum_double_g_impl(pout + j*ng, px + j*l, ng, pg, narm, l);\n// } else {\n// for(int j = 0; j != col; ++j) fsum_double_g_omp_impl(pout + j*ng, px + j*l, ng, pg, narm, l, nthreads);\n// } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " ng, pg, narm, l);\n// } else if(cmth) { // If high-dimensional: column-level parallelism\n// // for(int j = 0; j < col; ++j) fsum_int_g_impl(pout + j*ng, px + j*l, ng, pg, narm, l);\n// } else {\n// for(int j = 0; j != col; ++j) fsum_int_g_omp_impl(pout + j*ng, px + j*l, ng, pg, narm, l, nthreads);\n// } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "mj;\n// }\n// } else if(cmth) { // If high-dimensional: column-level parallelism\n// // for(int j = 0; j < col; ++j) {\n// double sumj = fsum_int_impl(px + j*l, narm, l);\n// if(sumj > INT_MAX || sumj <= INT_MIN) anyoutl = 1;\n// pout[j] = sumj;\n// } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ol; ++j) fsum_weights_impl(pout + j*ng, px + j*l, ng, pg, pw, narm, l);\n// } else if(cmth) {\n// // for(int j = 0; j < col; ++j) fsum_weights_impl(pout + j*ng, px + j*l, ng, pg, pw, narm, l);\n// } else {\n// for(int j = 0; j != col; ++j) fsum_weights_omp_impl(pout + j*ng, px + j*l, ng, pg, pw, narm, l, nthreads);\n// } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "uble *pout = REAL(out);\n// if(cmth) { // If high-dimensional: column-level parallelism\n// // for(int j = 0; j < l; ++j) pout[j] = asReal(fsumC(px[j], Rng, g, w, Rnarm, Rnthreads1));\n// } else {\n// for(int j = 0; j != l; ++j) pout[j] = asReal(fsumC(px[j], Rng, g, w, Rnarm, Rnthreads));\n// } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fsum.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "t = PROTECT(allocVector(VECSXP, l)), *pout = SEXPPTR(out), *px = SEXPPTR(x);\n// if(cmth) {\n// // for(int j = 0; j < l; ++j) pout[j] = fsumC(px[j], Rng, g, w, Rnarm, Rnthreads1);\n// } else {\n// for(int j = 0; j != l; ++j) pout[j] = fsumC(px[j], Rng, g, w, Rnarm, Rnthreads);\n// } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:mean,n)", "context_chars": 100, "text": "nst int narm, const int l, const int nthreads) {\n double mean = 0;\n if(narm) {\n int n = 0;\n for(int i = 0; i < l; ++i) {\n int tmp = NISNAN(px[i]);\n mean += tmp ? px[i] : 0.0;\n n += tmp ? 1 : 0;\n } #pragma omp parallel for simd num_threads(nthreads) reduction(+:mean,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:mean)", "context_chars": 100, "text": "n += tmp ? px[i] : 0.0;\n n += tmp ? 1 : 0;\n }\n return n == 0 ? NA_REAL : mean / n;\n }\n for(int i = 0; i < l; ++i) mean += px[i];\n return mean / l;\n}\n\nvoid fmean_double_g_impl(double *restrict pout, const double *restrict px, const int ng, const int *restrict pg, const int *restrict pgs, const int narm, const int l) {\n memset(pout, 0, sizeof(double) * ng);\n if(narm) {\n int *restrict n = (int*)Calloc(ng, int);\n for(int i = 0, gi; i != l; ++i) {\n if(ISNAN(px[i])) continue;\n gi = pg[i]-1;\n pout[gi] += px[i];\n ++n[gi];\n }\n for(int i = ng; i--; ) {\n if(n[i] == 0) pout[i] = NA_REAL;\n else pout[i] /= n[i];\n }\n Free(n);\n } else {\n --pout;\n for(int i = l; i--; ) pout[pg[i]] += px[i]; // Used to stop loop when all groups passed with NA, but probably no speed gain since groups are mostly ordered.\n ++pout;\n for(int i = ng; i--; ) pout[i] /= pgs[i];\n }\n} #pragma omp parallel for simd num_threads(nthreads) reduction(+:mean)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:mean,sumw)", "context_chars": 100, "text": "w, const int narm, const int l, const int nthreads) {\n double mean = 0, sumw = 0;\n if(narm) {\n for(int i = 0; i < l; ++i) {\n int tmp = NISNAN(px[i]) + NISNAN(pw[i]) == 2; // && doesn't vectorize for some reason\n mean += tmp ? px[i] * pw[i] : 0.0;\n sumw += tmp ? pw[i] : 0.0;\n } #pragma omp parallel for simd num_threads(nthreads) reduction(+:mean,sumw)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:mean,sumw)", "context_chars": 100, "text": " sumw += tmp ? pw[i] : 0.0;\n }\n if(mean == 0 && sumw == 0) sumw = NA_REAL;\n } else {\n for(int i = 0; i < l; ++i) {\n mean += px[i] * pw[i];\n sumw += pw[i];\n } #pragma omp parallel for simd num_threads(nthreads) reduction(+:mean,sumw)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:mean,n)", "context_chars": 100, "text": " int l, const int nthreads) {\n long long mean = 0;\n double dmean;\n if(narm) {\n int n = 0;\n for(int i = 0; i < l; ++i) {\n int tmp = px[i] != NA_INTEGER;\n mean += tmp ? px[i] : 0;\n n += tmp ? 1 : 0;\n } #pragma omp parallel for simd num_threads(nthreads) reduction(+:mean,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads) reduction(+:mean)", "context_chars": 100, "text": "ouble)mean / n;\n } else {\n if(px[0] == NA_INTEGER || px[l-1] == NA_INTEGER) return NA_REAL;\n for(int i = 0; i < l; ++i) mean += px[i];\n dmean = (double)mean / l;\n }\n return dmean;\n}\n\nvoid fmean_int_g_impl(double *restrict pout, const int *restrict px, const int ng, const int *restrict pg, const int *restrict pgs, const int narm, const int l) {\n memset(pout, 0, sizeof(double) * ng);\n if(narm) {\n int *restrict n = (int*)Calloc(ng, int);\n for(int i = 0, gi; i != l; ++i) {\n if(px[i] == NA_INTEGER) continue;\n gi = pg[i]-1;\n pout[gi] += px[i];\n ++n[gi];\n }\n for(int i = ng; i--; ) {\n if(n[i] == 0) pout[i] = NA_REAL;\n else pout[i] /= n[i];\n }\n Free(n);\n } else {\n --pout;\n for(int i = l; i--; ) pout[pg[i]] += px[i]; // Used to stop loop when all groups passed with NA, but probably no speed gain since groups are mostly ordered.\n ++pout;\n for(int i = ng; i--; ) pout[i] /= pgs[i];\n }\n} #pragma omp parallel for simd num_threads(nthreads) reduction(+:mean)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " pout[j] = fmean_double_impl(px + j*l, narm, l);\n } else if(col >= nthreads) {\n for(int j = 0; j < col; ++j) pout[j] = fmean_double_impl(px + j*l, narm, l);\n } else {\n for(int j = 0; j != col; ++j) pout[j] = fmean_double_omp_impl(px + j*l, narm, l, nthreads);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "g, pg, pgs, narm, l);\n } else {\n if(nthreads > col) nthreads = col;\n for(int j = 0; j < col; ++j) fmean_double_g_impl(pout + j*ng, px + j*l, ng, pg, pgs, narm, l);\n }\n }\n break;\n }\n case INTSXP: {\n const int *px = INTEGER(x);\n if(ng > 0) {\n if(nthreads <= 1 || col == 1) {\n for(int j = 0; j != col; ++j) fmean_int_g_impl(pout + j*ng, px + j*l, ng, pg, pgs, narm, l);\n } else {\n if(nthreads > col) nthreads = col;\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) fmean_int_g_impl(pout + j*ng, px + j*l, ng, pg, pgs, narm, l);\n }\n } else {\n if(nthreads <= 1) {\n for(int j = 0; j != col; ++j) pout[j] = fmean_int_impl(px + j*l, narm, l);\n } else if(col >= nthreads) {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pout[j] = fmean_int_impl(px + j*l, narm, l);\n } else {\n for(int j = 0; j != col; ++j) pout[j] = fmean_int_omp_impl(px + j*l, narm, l, nthreads);\n }\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "g, pg, pgs, narm, l);\n } else {\n if(nthreads > col) nthreads = col;\n for(int j = 0; j < col; ++j) fmean_int_g_impl(pout + j*ng, px + j*l, ng, pg, pgs, narm, l);\n }\n } else {\n if(nthreads <= 1) {\n for(int j = 0; j != col; ++j) pout[j] = fmean_int_impl(px + j*l, narm, l);\n } else if(col >= nthreads) {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pout[j] = fmean_int_impl(px + j*l, narm, l);\n } else {\n for(int j = 0; j != col; ++j) pout[j] = fmean_int_omp_impl(px + j*l, narm, l, nthreads);\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "+j) pout[j] = fmean_int_impl(px + j*l, narm, l);\n } else if(col >= nthreads) {\n for(int j = 0; j < col; ++j) pout[j] = fmean_int_impl(px + j*l, narm, l);\n } else {\n for(int j = 0; j != col; ++j) pout[j] = fmean_int_omp_impl(px + j*l, narm, l, nthreads);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "+j) pout[j] = fmean_weights_impl(px + j*l, pw, narm, l);\n } else if(col >= nthreads) {\n for(int j = 0; j < col; ++j) pout[j] = fmean_weights_impl(px + j*l, pw, narm, l);\n } else {\n for(int j = 0; j != col; ++j) pout[j] = fmean_weights_omp_impl(px + j*l, pw, narm, l, nthreads);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": ", px + j*l, ng, pg, pw, narm, l);\n } else {\n if(nthreads > col) nthreads = col;\n for(int j = 0; j < col; ++j) fmean_weights_g_impl(pout + j*ng, px + j*l, ng, pg, pw, narm, l);\n }\n }\n }\n matCopyAttr(out, x, Rdrop, ng);\n UNPROTECT(nprotect);\n return out;\n}\n\n\n// For safe multithreading across data frame columns\n\ndouble fmean_impl_dbl(SEXP x, int narm, int nthreads) {\n int l = length(x);\n if(l < 1) return NA_REAL;\n if(nthreads <= 1) switch(TYPEOF(x)) {\n case REALSXP: return fmean_double_impl(REAL(x), narm, l);\n case LGLSXP:\n case INTSXP: return fmean_int_impl(INTEGER(x), narm, l);\n default: error(\"Unsupported SEXP type: '%s'\", type2char(TYPEOF(x)));\n }\n switch(TYPEOF(x)) {\n case REALSXP: return fmean_double_omp_impl(REAL(x), narm, l, nthreads);\n case LGLSXP:\n case INTSXP: return fmean_int_omp_impl(INTEGER(x), narm, l, nthreads);\n default: error(\"Unsupported SEXP type: '%s'\", type2char(TYPEOF(x)));\n }\n} #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "!= R_NilValue && !(isObject(xj) && inherits(xj, \"ts\"))) copyMostAttrib(xj, outj);\n }\n for(int j = 0; j < l; ++j) fmean_g_omp_impl(px[j], DATAPTR(pout[j]), ng, pg, pgs, narm);\n } else {\n for(int j = 0; j != l; ++j) pout[j] = fmean_g_impl(px[j], ng, pg, pgs, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmean.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "up = 1;}\n SET_VECTOR_ELT(x, j, coerceVector(xj, REALSXP));\n }\n }\n for(int j = 0; j < l; ++j) fmean_weights_g_impl(REAL(pout[j]), REAL(px[j]), ng, pg, pw, narm, nrx);\n } else {\n for(int j = 0; j != l; ++j) pout[j] = fmean_wg_impl(px[j], ng, pg, pw, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/data.table_subset.c", "omp_pragma_line": "#pragma omp parallel for num_threads(getDTthreads())", "context_chars": 100, "text": " int *idxp = INTEGER(idx);\n\n bool stop = false;\n // #pragma omp simd reduction(|:stop)\n for (int i = 0; i < n; ++i) {\n int elem = idxp[i];\n stop |= (elem<1 && elem!=NA_INTEGER) || elem>max;\n } #pragma omp parallel for num_threads(getDTthreads())"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/programming.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "// Full distance matrix\n if(nthreads > 1) {\n if(nthreads > nrow-1) nthreads = nrow-1;\n for(int k = 1; k < nrow; ++k) { // Row vectors to compute distances with\n int nmk = nrow - k;\n double *presk = pres + l - nmk*(nmk+1)/2, // https://en.wikipedia.org/wiki/1_%2B_2_%2B_3_%2B_4_%2B_%E2%8B%AF\n *pxj = px + k, v;\n for(int j = 0; j != ncol; ++j) { // Elements of the row vector at hand\n v = pxj[-1];\n #pragma omp simd\n for(int i = 0; i < nmk; ++i) { // All remaining rows to compute the distance to\n double tmp = pxj[i] - v;\n presk[i] += tmp * tmp;\n }\n pxj += nrow;\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/programming.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads)", "context_chars": 100, "text": " for (int j = 0; j < ncol; ++j) {\n double *pxj = px + j * nrow, v = pv[j];\n for (int i = 0; i < nrow; ++i) {\n double tmp = pxj[i] - v;\n pres[i] += tmp * tmp;\n } #pragma omp parallel for simd num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/programming.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads) reduction(+:dres)", "context_chars": 100, "text": " double dres = 0.0;\n if(nthreads > 1) {\n if(nthreads > ncol) nthreads = ncol;\n for (int i = 0; i < ncol; ++i) {\n double tmp = px[i] - pv[i];\n dres += tmp * tmp;\n } #pragma omp parallel for num_threads(nthreads) reduction(+:dres)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/programming.c", "omp_pragma_line": "#pragma omp parallel for simd num_threads(nthreads)", "context_chars": 100, "text": "he square root loop below\n }\n }\n\n // Square Root\n if(ret == 1) {\n if(nthreads > 1) {\n for (size_t i = 0; i < l; ++i) pres[i] = sqrt(pres[i]);\n } else {\n #pragma omp simd\n for (size_t i = 0; i < l; ++i) pres[i] = sqrt(pres[i]);\n } #pragma omp parallel for simd num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "readed...\n switch(TYPEOF(x)) {\n case REALSXP: {\n const double *px = REAL(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_double(px + pst[gr]-1, po, pgs[gr], 1, narm);\n break;\n }\n case INTSXP: {\n const int *px = INTEGER(x);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_fct(px + pst[gr]-1, po, pgs[gr], M, 1, narm);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_int(px + pst[gr]-1, po, pgs[gr], 1, narm);\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ER(x);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_fct(px + pst[gr]-1, po, pgs[gr], M, 1, narm);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_int(px + pst[gr]-1, po, pgs[gr], 1, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "s[gr] == 0 ? 0 : ndistinct_fct(px + pst[gr]-1, po, pgs[gr], M, 1, narm);\n } else {\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_int(px + pst[gr]-1, po, pgs[gr], 1, narm);\n }\n break;\n }\n case LGLSXP: {\n const int *px = LOGICAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_logi(px + pst[gr]-1, po, pgs[gr], 1, narm);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": ";\n }\n break;\n }\n case LGLSXP: {\n const int *px = LOGICAL(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_logi(px + pst[gr]-1, po, pgs[gr], 1, narm);\n break;\n }\n case STRSXP: {\n const SEXP *px = SEXPPTR(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_string(px + pst[gr]-1, po, pgs[gr], 1, narm);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " 1, narm);\n break;\n }\n case STRSXP: {\n const SEXP *px = SEXPPTR(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_string(px + pst[gr]-1, po, pgs[gr], 1, narm);\n break;\n }\n default: error(\"Not Supported SEXP Type!\");\n }\n } else { // Not sorted. Perhaps reordering x is faster??\n switch(TYPEOF(x)) {\n case REALSXP: {\n const double *px = REAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_double(px, po + pst[gr]-1, pgs[gr], 0, narm);\n break;\n }\n case INTSXP: {\n const int *px = INTEGER(x);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_fct(px, po + pst[gr]-1, pgs[gr], M, 0, narm);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_int(px, po + pst[gr]-1, pgs[gr], 0, narm);\n }\n break;\n }\n case LGLSXP: {\n const int *px = LOGICAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_logi(px, po + pst[gr]-1, pgs[gr], 0, narm);\n break;\n }\n case STRSXP: {\n const SEXP *px = SEXPPTR(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_string(px, po + pst[gr]-1, pgs[gr], 0, narm);\n break;\n }\n default: error(\"Not Supported SEXP Type!\");\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " faster??\n switch(TYPEOF(x)) {\n case REALSXP: {\n const double *px = REAL(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_double(px, po + pst[gr]-1, pgs[gr], 0, narm);\n break;\n }\n case INTSXP: {\n const int *px = INTEGER(x);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_fct(px, po + pst[gr]-1, pgs[gr], M, 0, narm);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_int(px, po + pst[gr]-1, pgs[gr], 0, narm);\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ER(x);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_fct(px, po + pst[gr]-1, pgs[gr], M, 0, narm);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_int(px, po + pst[gr]-1, pgs[gr], 0, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "s[gr] == 0 ? 0 : ndistinct_fct(px, po + pst[gr]-1, pgs[gr], M, 0, narm);\n } else {\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_int(px, po + pst[gr]-1, pgs[gr], 0, narm);\n }\n break;\n }\n case LGLSXP: {\n const int *px = LOGICAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_logi(px, po + pst[gr]-1, pgs[gr], 0, narm);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": ";\n }\n break;\n }\n case LGLSXP: {\n const int *px = LOGICAL(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_logi(px, po + pst[gr]-1, pgs[gr], 0, narm);\n break;\n }\n case STRSXP: {\n const SEXP *px = SEXPPTR(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_string(px, po + pst[gr]-1, pgs[gr], 0, narm);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " 0, narm);\n break;\n }\n case STRSXP: {\n const SEXP *px = SEXPPTR(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? 0 : ndistinct_string(px, po + pst[gr]-1, pgs[gr], 0, narm);\n break;\n }\n default: error(\"Not Supported SEXP Type!\");\n }\n }\n\n UNPROTECT(1);\n return res;\n}\n\n// Functions for Export --------------------------------------------------------\n\nSEXP fndistinctC(SEXP x, SEXP g, SEXP Rnarm, SEXP Rnthreads) {\n if(isNull(g)) return ndistinct_impl(x, asLogical(Rnarm));\n if(TYPEOF(g) != VECSXP || !inherits(g, \"GRP\")) error(\"g needs to be an object of class 'GRP', see ?GRP\");\n const SEXP *restrict pg = SEXPPTR_RO(g), o = pg[6];\n SEXP res;\n int sorted = LOGICAL(pg[5])[1] == 1, ng = INTEGER(pg[0])[0], *restrict pgs = INTEGER(pg[2]), *restrict po, *restrict pst,\n l = length(x), nthreads = asInteger(Rnthreads);\n if(l != length(pg[1])) error(\"length(g) must match length(x)\");\n if(isNull(o)) {\n int *cgs = (int *) R_alloc(ng+2, sizeof(int)), *restrict pgv = INTEGER(pg[1]); cgs[1] = 1;\n for(int i = 0; i != ng; ++i) cgs[i+2] = cgs[i+1] + pgs[i];\n pst = cgs + 1;\n if(sorted) po = &l;\n else {\n int *restrict count = (int *) Calloc(ng+1, int);\n po = (int *) R_alloc(l, sizeof(int)); --po;\n for(int i = 0; i != l; ++i) po[cgs[pgv[i]] + count[pgv[i]]++] = i+1;\n ++po; Free(count);\n }\n } else {\n po = INTEGER(o);\n pst = INTEGER(getAttrib(o, install(\"starts\")));\n }\n if(nthreads > max_threads) nthreads = max_threads;\n PROTECT(res = ndistinct_g_impl(x, ng, pgs, po, pst, sorted, asLogical(Rnarm), nthreads));\n if(OBJECT(x) == 0) copyMostAttrib(x, res);\n else {\n SEXP sym_label = install(\"label\");\n setAttrib(res, sym_label, getAttrib(x, sym_label));\n }\n UNPROTECT(1);\n return res;\n} #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " pout[j] = ndistinct_impl_int(px[j], narm);\n } else {\n if(nthreads > l) nthreads = l;\n for(int j = 0; j < l; ++j) pout[j] = ndistinct_impl_int(px[j], narm);\n }\n setAttrib(out, R_NamesSymbol, getAttrib(x, R_NamesSymbol));\n UNPROTECT(1);\n return out;\n } else {\n SEXP out = PROTECT(allocVector(VECSXP, l)), sym_label = PROTECT(install(\"label\")), *restrict pout = SEXPPTR(out);\n const SEXP *restrict px = SEXPPTR_RO(x);\n if(isNull(g)) {\n if(nthreads <= 1) {\n for(int j = 0; j != l; ++j) pout[j] = ndistinct_impl(px[j], narm);\n } else {\n if(nthreads > l) nthreads = l;\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < l; ++j) pout[j] = ndistinct_impl(px[j], narm);\n }\n // Not thread safe and thus taken out\n for(int j = 0; j != l; ++j) {\n SEXP xj = px[j];\n if(OBJECT(xj) == 0) copyMostAttrib(xj, pout[j]);\n else setAttrib(pout[j], sym_label, getAttrib(xj, sym_label));\n }\n DFcopyAttr(out, x, /*ng=*/0);\n } else {\n if(TYPEOF(g) != VECSXP || !inherits(g, \"GRP\")) error(\"g needs to be an object of class 'GRP', see ?GRP\");\n const SEXP *restrict pg = SEXPPTR_RO(g), o = pg[6];\n int sorted = LOGICAL(pg[5])[1] == 1, ng = INTEGER(pg[0])[0], *restrict pgs = INTEGER(pg[2]), *restrict po, *restrict pst, gl = length(pg[1]);\n if(isNull(o)) {\n int *cgs = (int *) R_alloc(ng+2, sizeof(int)), *restrict pgv = INTEGER(pg[1]); cgs[1] = 1;\n for(int i = 0; i != ng; ++i) cgs[i+2] = cgs[i+1] + pgs[i];\n pst = cgs + 1;\n if(sorted) po = &l;\n else {\n int *restrict count = (int *) Calloc(ng+1, int);\n po = (int *) R_alloc(gl, sizeof(int)); --po;\n for(int i = 0; i != gl; ++i) po[cgs[pgv[i]] + count[pgv[i]]++] = i+1;\n ++po; Free(count);\n }\n } else {\n po = INTEGER(o);\n pst = INTEGER(getAttrib(o, install(\"starts\")));\n }\n for(int j = 0; j != l; ++j) {\n SEXP xj = px[j];\n if(length(xj) != gl) error(\"length(g) must match nrow(x)\");\n pout[j] = ndistinct_g_impl(xj, ng, pgs, po, pst, sorted, narm, nthreads);\n if(OBJECT(xj) == 0) copyMostAttrib(xj, pout[j]);\n else setAttrib(pout[j], sym_label, getAttrib(xj, sym_label));\n }\n DFcopyAttr(out, x, ng);\n }\n UNPROTECT(2);\n return out;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "out[j] = ndistinct_impl(px[j], narm);\n } else {\n if(nthreads > l) nthreads = l;\n for(int j = 0; j < l; ++j) pout[j] = ndistinct_impl(px[j], narm);\n }\n // Not thread safe and thus taken out\n for(int j = 0; j != l; ++j) {\n SEXP xj = px[j];\n if(OBJECT(xj) == 0) copyMostAttrib(xj, pout[j]);\n else setAttrib(pout[j], sym_label, getAttrib(xj, sym_label));\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " col) nthreads = col;\n\n switch(tx) {\n case REALSXP: {\n double *px = REAL(x);\n for(int j = 0; j < col; ++j)\n pres[j] = ndistinct_double(px + j*l, &l, l, 1, narm);\n break;\n }\n case INTSXP: { // Factor matrix not well defined object...\n int *px = INTEGER(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j)\n pres[j] = ndistinct_int(px + j*l, &l, l, 1, narm);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " case INTSXP: { // Factor matrix not well defined object...\n int *px = INTEGER(x);\n for(int j = 0; j < col; ++j)\n pres[j] = ndistinct_int(px + j*l, &l, l, 1, narm);\n break;\n }\n case LGLSXP: {\n int *px = INTEGER(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j)\n pres[j] = ndistinct_logi(px + j*l, &l, l, 1, narm);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " &l, l, 1, narm);\n break;\n }\n case LGLSXP: {\n int *px = INTEGER(x);\n for(int j = 0; j < col; ++j)\n pres[j] = ndistinct_logi(px + j*l, &l, l, 1, narm);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j)\n pres[j] = ndistinct_string(px + j*l, &l, l, 1, narm);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "&l, l, 1, narm);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x);\n for(int j = 0; j < col; ++j)\n pres[j] = ndistinct_string(px + j*l, &l, l, 1, narm);\n break;\n }\n default: error(\"Not Supported SEXP Type!\");\n }\n matCopyAttr(res, x, Rdrop, /*ng=*/0);\n UNPROTECT(1);\n return res;\n } else { // With groups\n if(TYPEOF(g) != VECSXP || !inherits(g, \"GRP\")) error(\"g needs to be an object of class 'GRP', see ?GRP\");\n const SEXP *restrict pg = SEXPPTR_RO(g), o = pg[6];\n int sorted = LOGICAL(pg[5])[1] == 1, ng = INTEGER(pg[0])[0], *restrict pgs = INTEGER(pg[2]), *restrict po, *restrict pst, gl = length(pg[1]);\n if(l != gl) error(\"length(g) must match nrow(x)\");\n\n SEXP res = PROTECT(allocVector(INTSXP, col * ng));\n int *restrict pres = INTEGER(res);\n if(nthreads > col) nthreads = col; // column-level sufficient? or do sub-column level??\n\n if(isNull(o)) {\n int *cgs = (int *) R_alloc(ng+2, sizeof(int)), *restrict pgv = INTEGER(pg[1]); cgs[1] = 1;\n for(int i = 0; i != ng; ++i) cgs[i+2] = cgs[i+1] + pgs[i];\n pst = cgs + 1;\n if(sorted) po = &l;\n else {\n int *restrict count = (int *) Calloc(ng+1, int);\n po = (int *) R_alloc(l, sizeof(int)); --po;\n for(int i = 0; i != l; ++i) po[cgs[pgv[i]] + count[pgv[i]]++] = i+1;\n ++po; Free(count);\n }\n } else {\n po = INTEGER(o);\n pst = INTEGER(getAttrib(o, install(\"starts\")));\n }\n\n if(sorted) { // Sorted\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n double *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_double(pxj + pst[gr]-1, po, pgs[gr], 1, narm);\n }\n break;\n }\n case INTSXP: { // Factor matrix not well defined object...\n int *px = INTEGER(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n int *pxj = px + j * l, jng = j * ng;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_int(pxj + pst[gr]-1, po, pgs[gr], 1, narm);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n int *pxj = px + j * l, jng = j * ng;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_logi(pxj + pst[gr]-1, po, pgs[gr], 1, narm);\n }\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n SEXP *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_string(pxj + pst[gr]-1, po, pgs[gr], 1, narm);\n }\n break;\n }\n default: error(\"Not Supported SEXP Type!\");\n }\n } else { // Not sorted. Perhaps reordering x is faster??\n // Todo: perhaps going first by groups, then by columns is better? saves zero group size checks...\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n double *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_double(pxj, po + pst[gr]-1, pgs[gr], 0, narm);\n }\n break;\n }\n case INTSXP: { // Factor matrix not well defined object...\n int *px = INTEGER(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n int jng = j * ng, *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_int(pxj, po + pst[gr]-1, pgs[gr], 0, narm);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n int jng = j * ng, *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_logi(pxj, po + pst[gr]-1, pgs[gr], 0, narm);\n }\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n SEXP *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_string(pxj, po + pst[gr]-1, pgs[gr], 0, narm);\n }\n break;\n }\n default: error(\"Not Supported SEXP Type!\");\n }\n }\n matCopyAttr(res, x, Rdrop, ng);\n UNPROTECT(1);\n return res;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " Sorted\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n double *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_double(pxj + pst[gr]-1, po, pgs[gr], 1, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ase INTSXP: { // Factor matrix not well defined object...\n int *px = INTEGER(x);\n for(int j = 0; j < col; ++j) {\n int *pxj = px + j * l, jng = j * ng;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_int(pxj + pst[gr]-1, po, pgs[gr], 1, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x);\n for(int j = 0; j < col; ++j) {\n int *pxj = px + j * l, jng = j * ng;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_logi(pxj + pst[gr]-1, po, pgs[gr], 1, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x);\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n SEXP *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_string(pxj + pst[gr]-1, po, pgs[gr], 1, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ecks...\n switch(TYPEOF(x)) {\n case REALSXP: {\n double *px = REAL(x);\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n double *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_double(pxj, po + pst[gr]-1, pgs[gr], 0, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ase INTSXP: { // Factor matrix not well defined object...\n int *px = INTEGER(x);\n for(int j = 0; j < col; ++j) {\n int jng = j * ng, *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_int(pxj, po + pst[gr]-1, pgs[gr], 0, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x);\n for(int j = 0; j < col; ++j) {\n int jng = j * ng, *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_logi(pxj, po + pst[gr]-1, pgs[gr], 0, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fndistinct.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " }\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x);\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n SEXP *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr)\n pres[jng + gr] = pgs[gr] == 0 ? 0 : ndistinct_string(pxj, po + pst[gr]-1, pgs[gr], 0, narm);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "&l;\n switch(tx) {\n case REALSXP: {\n double *px = REAL(x), *pres = REAL(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_REAL : mode_double(px + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n break;\n }\n case INTSXP: {\n int *px = INTEGER(x), *pres = INTEGER(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_fct_logi(px + pst[gr]-1, po, pgs[gr], M, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_int(px + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_fct_logi(px + pst[gr]-1, po, pgs[gr], M, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_int(px + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "A_INTEGER : mode_fct_logi(px + pst[gr]-1, po, pgs[gr], M, 1, narm, ret);\n } else {\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_int(px + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : mode_fct_logi(px + pst[gr]-1, po, pgs[gr], 1, 1, narm, ret);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : mode_fct_logi(px + pst[gr]-1, po, pgs[gr], 1, 1, narm, ret);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : mode_string(px + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : mode_string(px + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(tx));\n }\n } else { // Not sorted. Perhaps reordering x is faster??\n switch(tx) {\n case REALSXP: {\n double *px = REAL(x), *pres = REAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_REAL : mode_double(px, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n case INTSXP: {\n int *px = INTEGER(x), *pres = INTEGER(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_fct_logi(px, po + pst[gr]-1, pgs[gr], M, 0, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_int(px, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : mode_fct_logi(px, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : mode_string(px, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(tx));\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "r??\n switch(tx) {\n case REALSXP: {\n double *px = REAL(x), *pres = REAL(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_REAL : mode_double(px, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n case INTSXP: {\n int *px = INTEGER(x), *pres = INTEGER(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_fct_logi(px, po + pst[gr]-1, pgs[gr], M, 0, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_int(px, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_fct_logi(px, po + pst[gr]-1, pgs[gr], M, 0, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_int(px, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "A_INTEGER : mode_fct_logi(px, po + pst[gr]-1, pgs[gr], M, 0, narm, ret);\n } else {\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : mode_int(px, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : mode_fct_logi(px, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : mode_fct_logi(px, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : mode_string(px, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : mode_string(px, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(tx));\n }\n }\n\n copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n}\n\nSEXP w_mode_g_impl(SEXP x, double *pw, int ng, int *pgs, int *po, int *pst, int sorted, int narm, int ret, int nthreads) {\n\n int l = length(x), tx = TYPEOF(x);\n if(nthreads > ng) nthreads = ng;\n\n SEXP res = PROTECT(allocVector(tx, ng));\n\n if(sorted) { // Sorted: could compute cumulative group size (= starts) on the fly... but doesn't work multithreaded...\n po = &l;\n switch(tx) {\n case REALSXP: {\n double *px = REAL(x), *pres = REAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_REAL : w_mode_double(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n break;\n }\n case INTSXP: {\n int *px = INTEGER(x), *pres = INTEGER(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_fct_logi(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], M, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : w_mode_fct_logi(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, 1, narm, ret);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : w_mode_string(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(tx));\n }\n } else { // Not sorted. Perhaps reordering x is faster??\n switch(tx) {\n case REALSXP: {\n double *px = REAL(x), *pres = REAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_REAL : w_mode_double(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n case INTSXP: {\n int *px = INTEGER(x), *pres = INTEGER(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_fct_logi(px, pw, po + pst[gr]-1, pgs[gr], M, 0, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : w_mode_fct_logi(px, pw, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : w_mode_string(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(tx));\n }\n }\n\n copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n} #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "&l;\n switch(tx) {\n case REALSXP: {\n double *px = REAL(x), *pres = REAL(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_REAL : w_mode_double(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n break;\n }\n case INTSXP: {\n int *px = INTEGER(x), *pres = INTEGER(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_fct_logi(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], M, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_fct_logi(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], M, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "_fct_logi(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], M, 1, narm, ret);\n } else {\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : w_mode_fct_logi(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, 1, narm, ret);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : w_mode_fct_logi(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, 1, narm, ret);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : w_mode_string(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : w_mode_string(px + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(tx));\n }\n } else { // Not sorted. Perhaps reordering x is faster??\n switch(tx) {\n case REALSXP: {\n double *px = REAL(x), *pres = REAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_REAL : w_mode_double(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n case INTSXP: {\n int *px = INTEGER(x), *pres = INTEGER(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_fct_logi(px, pw, po + pst[gr]-1, pgs[gr], M, 0, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : w_mode_fct_logi(px, pw, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : w_mode_string(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(tx));\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "r??\n switch(tx) {\n case REALSXP: {\n double *px = REAL(x), *pres = REAL(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_REAL : w_mode_double(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n case INTSXP: {\n int *px = INTEGER(x), *pres = INTEGER(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_fct_logi(px, pw, po + pst[gr]-1, pgs[gr], M, 0, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "(res);\n if(isFactor(x) && nlevels(x) < l / ng * 3) {\n int M = nlevels(x);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_fct_logi(px, pw, po + pst[gr]-1, pgs[gr], M, 0, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "GER : w_mode_fct_logi(px, pw, po + pst[gr]-1, pgs[gr], M, 0, narm, ret);\n } else {\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : w_mode_fct_logi(px, pw, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *pres = LOGICAL(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_LOGICAL : w_mode_fct_logi(px, pw, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n #pragma omp parallel for num_threads(nthreads)\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : w_mode_string(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *pres = SEXPPTR(res);\n for(int gr = 0; gr < ng; ++gr)\n pres[gr] = pgs[gr] == 0 ? NA_STRING : w_mode_string(px, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(tx));\n }\n }\n\n copyMostAttrib(x, res);\n UNPROTECT(1);\n return res;\n}\n\n\n// Functions for Export --------------------------------------------------------\n\nSEXP fmodeC(SEXP x, SEXP g, SEXP w, SEXP Rnarm, SEXP Rret, SEXP Rnthreads) {\n int nullg = isNull(g), nullw = isNull(w), l = length(x), nprotect = 0;\n if(l <= 1) return x;\n if(nullg && nullw) return mode_impl(x, asLogical(Rnarm), asInteger(Rret));\n double tmp = 0.0, *restrict pw = &tmp;\n if(!nullw) {\n if(length(w) != l) error(\"length(w) must match length(x)\");\n if(TYPEOF(w) != REALSXP) {\n if(!(TYPEOF(w) == INTSXP || TYPEOF(w) == LGLSXP)) error(\"weights need to be double or integer/logical (internally coerced to double)\");\n w = PROTECT(coerceVector(w, REALSXP)); ++nprotect;\n }\n pw = REAL(w);\n }\n if(nullg) {\n // if(TYPEOF(w) != REALSXP)\n UNPROTECT(nprotect);\n return w_mode_impl(x, pw, asLogical(Rnarm), asInteger(Rret));\n }\n if(TYPEOF(g) != VECSXP || !inherits(g, \"GRP\")) error(\"g needs to be an object of class 'GRP', see ?GRP\");\n const SEXP *restrict pg = SEXPPTR_RO(g), o = pg[6];\n int sorted = LOGICAL(pg[5])[1] == 1, ng = INTEGER(pg[0])[0], *restrict pgs = INTEGER(pg[2]), *restrict po, *restrict pst, nthreads = asInteger(Rnthreads);\n if(l != length(pg[1])) error(\"length(g) must match length(x)\");\n if(isNull(o)) {\n int *cgs = (int *) R_alloc(ng+2, sizeof(int)), *restrict pgv = INTEGER(pg[1]); cgs[1] = 1;\n for(int i = 0; i != ng; ++i) cgs[i+2] = cgs[i+1] + pgs[i];\n pst = cgs + 1;\n if(sorted) po = &l;\n else {\n int *restrict count = (int *) Calloc(ng+1, int);\n po = (int *) R_alloc(l, sizeof(int)); --po;\n for(int i = 0; i != l; ++i) po[cgs[pgv[i]] + count[pgv[i]]++] = i+1;\n ++po; Free(count);\n }\n } else {\n po = INTEGER(o);\n pst = INTEGER(getAttrib(o, install(\"starts\")));\n }\n // if(nullw) return mode_g_impl(x, ng, pgs, po, pst, sorted, asLogical(Rnarm), asInteger(Rret), asInteger(Rnthreads));\n // if(TYPEOF(w) != REALSXP) UNPROTECT(nprotect);\n // return w_mode_g_impl(x, pw, ng, pgs, po, pst, sorted, asLogical(Rnarm), asInteger(Rret), asInteger(Rnthreads));\n // Thomas Kalibera Patch:\n if(nthreads > max_threads) nthreads = max_threads;\n SEXP res;\n if(nullw) res = mode_g_impl(x, ng, pgs, po, pst, sorted, asLogical(Rnarm), asInteger(Rret), nthreads);\n else res = w_mode_g_impl(x, pw, ng, pgs, po, pst, sorted, asLogical(Rnarm), asInteger(Rret), nthreads);\n UNPROTECT(nprotect);\n return res;\n} #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "<= 1) {\n for(int j = 0; j != l; ++j) pout[j] = mode_impl(px[j], narm, ret);\n } else {\n for(int j = 0; j < l; ++j) pout[j] = mode_impl_plain(px[j], narm, ret);\n for(int j = 0; j != l; ++j) copyMostAttrib(px[j], pout[j]); // Not thread safe and thus taken out...\n }\n } else {\n int nrx = length(px[0]);\n double tmp = 0.0, *restrict pw = &tmp;\n if(!nullw) {\n if(length(w) != nrx) error(\"length(w) must match nrow(x)\");\n if(TYPEOF(w) != REALSXP) {\n if(!(TYPEOF(w) == INTSXP || TYPEOF(w) == LGLSXP)) error(\"weights need to be double or integer/logical (internally coerced to double)\");\n w = PROTECT(coerceVector(w, REALSXP)); ++nprotect;\n }\n pw = REAL(w);\n }\n if(nullg) {\n if(nthreads <= 1) {\n for(int j = 0; j != l; ++j) pout[j] = w_mode_impl(px[j], pw, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < l; ++j) pout[j] = w_mode_impl_plain(px[j], pw, narm, ret);\n for(int j = 0; j != l; ++j) copyMostAttrib(px[j], pout[j]); // Not thread safe and thus taken out...\n }\n } else {\n if(TYPEOF(g) != VECSXP || !inherits(g, \"GRP\")) error(\"g needs to be an object of class 'GRP', see ?GRP\");\n const SEXP *restrict pg = SEXPPTR_RO(g), o = pg[6];\n ng = INTEGER(pg[0])[0];\n int sorted = LOGICAL(pg[5])[1] == 1, *restrict pgs = INTEGER(pg[2]), *restrict po, *restrict pst;\n if(nrx != length(pg[1])) error(\"length(g) must match nrow(x)\");\n if(isNull(o)) {\n int *cgs = (int *) R_alloc(ng+2, sizeof(int)), *restrict pgv = INTEGER(pg[1]); cgs[1] = 1;\n for(int i = 0; i != ng; ++i) cgs[i+2] = cgs[i+1] + pgs[i];\n pst = cgs + 1;\n if(sorted) po = &l;\n else {\n int *restrict count = (int *) Calloc(ng+1, int);\n po = (int *) R_alloc(nrx, sizeof(int)); --po;\n for(int i = 0; i != nrx; ++i) po[cgs[pgv[i]] + count[pgv[i]]++] = i+1;\n ++po; Free(count);\n }\n } else {\n po = INTEGER(o);\n pst = INTEGER(getAttrib(o, install(\"starts\")));\n }\n if(nullw) { // Parallelism at sub-column level\n for(int j = 0; j < l; ++j) pout[j] = mode_g_impl(px[j], ng, pgs, po, pst, sorted, narm, ret, nthreads);\n } else { // Parallelism at sub-column level\n for(int j = 0; j < l; ++j) pout[j] = w_mode_g_impl(px[j], pw, ng, pgs, po, pst, sorted, narm, ret, nthreads);\n }\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " for(int j = 0; j != l; ++j) pout[j] = w_mode_impl(px[j], pw, narm, ret);\n } else {\n for(int j = 0; j < l; ++j) pout[j] = w_mode_impl_plain(px[j], pw, narm, ret);\n for(int j = 0; j != l; ++j) copyMostAttrib(px[j], pout[j]); // Not thread safe and thus taken out...\n }\n } else {\n if(TYPEOF(g) != VECSXP || !inherits(g, \"GRP\")) error(\"g needs to be an object of class 'GRP', see ?GRP\");\n const SEXP *restrict pg = SEXPPTR_RO(g), o = pg[6];\n ng = INTEGER(pg[0])[0];\n int sorted = LOGICAL(pg[5])[1] == 1, *restrict pgs = INTEGER(pg[2]), *restrict po, *restrict pst;\n if(nrx != length(pg[1])) error(\"length(g) must match nrow(x)\");\n if(isNull(o)) {\n int *cgs = (int *) R_alloc(ng+2, sizeof(int)), *restrict pgv = INTEGER(pg[1]); cgs[1] = 1;\n for(int i = 0; i != ng; ++i) cgs[i+2] = cgs[i+1] + pgs[i];\n pst = cgs + 1;\n if(sorted) po = &l;\n else {\n int *restrict count = (int *) Calloc(ng+1, int);\n po = (int *) R_alloc(nrx, sizeof(int)); --po;\n for(int i = 0; i != nrx; ++i) po[cgs[pgv[i]] + count[pgv[i]]++] = i+1;\n ++po; Free(count);\n }\n } else {\n po = INTEGER(o);\n pst = INTEGER(getAttrib(o, install(\"starts\")));\n }\n if(nullw) { // Parallelism at sub-column level\n for(int j = 0; j < l; ++j) pout[j] = mode_g_impl(px[j], ng, pgs, po, pst, sorted, narm, ret, nthreads);\n } else { // Parallelism at sub-column level\n for(int j = 0; j < l; ++j) pout[j] = w_mode_g_impl(px[j], pw, ng, pgs, po, pst, sorted, narm, ret, nthreads);\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " REALSXP: {\n double *px = REAL(x), *restrict pres = REAL(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) pres[j] = mode_double(px + j*l, &l, l, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = w_mode_double(px + j*l, pw, &l, l, 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "= 0; j < col; ++j) pres[j] = mode_double(px + j*l, &l, l, 1, narm, ret);\n } else {\n for(int j = 0; j < col; ++j) pres[j] = w_mode_double(px + j*l, pw, &l, l, 1, narm, ret);\n }\n break;\n }\n case INTSXP: { // Factor matrix not well defined object...\n int *px = INTEGER(x), *restrict pres = INTEGER(res);\n if(nullw) {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = mode_int(px + j*l, &l, l, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = w_mode_int(px + j*l, pw, &l, l, 1, narm, ret);\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "bject...\n int *px = INTEGER(x), *restrict pres = INTEGER(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) pres[j] = mode_int(px + j*l, &l, l, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = w_mode_int(px + j*l, pw, &l, l, 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " j = 0; j < col; ++j) pres[j] = mode_int(px + j*l, &l, l, 1, narm, ret);\n } else {\n for(int j = 0; j < col; ++j) pres[j] = w_mode_int(px + j*l, pw, &l, l, 1, narm, ret);\n }\n break;\n }\n case LGLSXP: {\n int *px = LOGICAL(x), *restrict pres = LOGICAL(res);\n if(nullw) {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = mode_fct_logi(px + j*l, &l, l, 1, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = w_mode_fct_logi(px + j*l, pw, &l, l, 1, 1, narm, ret);\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "GLSXP: {\n int *px = LOGICAL(x), *restrict pres = LOGICAL(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) pres[j] = mode_fct_logi(px + j*l, &l, l, 1, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = w_mode_fct_logi(px + j*l, pw, &l, l, 1, 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "j < col; ++j) pres[j] = mode_fct_logi(px + j*l, &l, l, 1, 1, narm, ret);\n } else {\n for(int j = 0; j < col; ++j) pres[j] = w_mode_fct_logi(px + j*l, pw, &l, l, 1, 1, narm, ret);\n }\n break;\n }\n case STRSXP: {\n SEXP *px = SEXPPTR(x), *restrict pres = SEXPPTR(res);\n if(nullw) {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = mode_string(px + j*l, &l, l, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = w_mode_string(px + j*l, pw, &l, l, 1, narm, ret);\n }\n break;\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "RSXP: {\n SEXP *px = SEXPPTR(x), *restrict pres = SEXPPTR(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) pres[j] = mode_string(px + j*l, &l, l, 1, narm, ret);\n } else {\n #pragma omp parallel for num_threads(nthreads)\n for(int j = 0; j < col; ++j) pres[j] = w_mode_string(px + j*l, pw, &l, l, 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "= 0; j < col; ++j) pres[j] = mode_string(px + j*l, &l, l, 1, narm, ret);\n } else {\n for(int j = 0; j < col; ++j) pres[j] = w_mode_string(px + j*l, pw, &l, l, 1, narm, ret);\n }\n break;\n }\n default: error(\"Not Supported SEXP Type: '%s'\", type2char(tx));\n }\n\n matCopyAttr(res, x, Rdrop, /*ng=*/0);\n UNPROTECT(nprotect);\n return res;\n }\n\n // With groups\n if(TYPEOF(g) != VECSXP || !inherits(g, \"GRP\")) error(\"g needs to be an object of class 'GRP', see ?GRP\");\n const SEXP *restrict pg = SEXPPTR_RO(g), o = pg[6];\n int sorted = LOGICAL(pg[5])[1] == 1, ng = INTEGER(pg[0])[0], *restrict pgs = INTEGER(pg[2]), *restrict po, *restrict pst, gl = length(pg[1]);\n if(l != gl) error(\"length(g) must match nrow(x)\");\n SEXP res = PROTECT(allocVector(tx, ng * col));\n\n if(isNull(o)) {\n int *cgs = (int *) R_alloc(ng+2, sizeof(int)), *restrict pgv = INTEGER(pg[1]); cgs[1] = 1;\n for(int i = 0; i != ng; ++i) cgs[i+2] = cgs[i+1] + pgs[i];\n pst = cgs + 1;\n if(sorted) po = &l;\n else {\n int *restrict count = (int *) Calloc(ng+1, int);\n po = (int *) R_alloc(l, sizeof(int)); --po;\n for(int i = 0; i != l; ++i) po[cgs[pgv[i]] + count[pgv[i]]++] = i+1;\n ++po; Free(count);\n }\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " REALSXP: {\n double *px = REAL(x), *restrict pres = REAL(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n double *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_REAL : mode_double(pxj + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "L : mode_double(pxj + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n }\n } else {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n double *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_REAL : w_mode_double(pxj + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "bject...\n int *px = INTEGER(x), *restrict pres = INTEGER(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) {\n int *pxj = px + j * l, jng = j * ng;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_INTEGER : mode_int(pxj + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "EGER : mode_int(pxj + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n }\n } else {\n for(int j = 0; j < col; ++j) {\n int *pxj = px + j * l, jng = j * ng;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(pxj + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "GLSXP: {\n int *px = LOGICAL(x), *restrict pres = LOGICAL(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) {\n int *pxj = px + j * l, jng = j * ng;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_LOGICAL : mode_fct_logi(pxj + pst[gr]-1, po, pgs[gr], 1, 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ode_fct_logi(pxj + pst[gr]-1, po, pgs[gr], 1, 1, narm, ret);\n }\n } else {\n for(int j = 0; j < col; ++j) {\n int *pxj = px + j * l, jng = j * ng;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_LOGICAL : w_mode_fct_logi(pxj + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "RSXP: {\n SEXP *px = SEXPPTR(x), *restrict pres = SEXPPTR(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n SEXP *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_STRING : mode_string(pxj + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "G : mode_string(pxj + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n }\n } else {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n SEXP *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_STRING : w_mode_string(pxj + pst[gr]-1, pw + pst[gr]-1, po, pgs[gr], 1, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": " REALSXP: {\n double *px = REAL(x), *restrict pres = REAL(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n double *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_REAL : mode_double(pxj, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "L : mode_double(pxj, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n } else {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n double *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_REAL : w_mode_double(pxj, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "NTSXP: {\n int *px = INTEGER(x), *restrict pres = INTEGER(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng, *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_INTEGER : mode_int(pxj, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "EGER : mode_int(pxj, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n } else {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng, *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_INTEGER : w_mode_int(pxj, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "GLSXP: {\n int *px = LOGICAL(x), *restrict pres = LOGICAL(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng, *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_LOGICAL : mode_fct_logi(pxj, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "ode_fct_logi(pxj, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n }\n } else {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng, *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_LOGICAL : w_mode_fct_logi(pxj, pw, po + pst[gr]-1, pgs[gr], 1, 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "RSXP: {\n SEXP *px = SEXPPTR(x), *restrict pres = SEXPPTR(res);\n if(nullw) {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n SEXP *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_STRING : mode_string(pxj, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/SebKrantz/collapse/src/fmode.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nthreads)", "context_chars": 100, "text": "G : mode_string(pxj, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n }\n } else {\n for(int j = 0; j < col; ++j) {\n int jng = j * ng;\n SEXP *pxj = px + j * l;\n for(int gr = 0; gr < ng; ++gr) pres[jng + gr] = pgs[gr] == 0 ? NA_STRING : w_mode_string(pxj, pw, po + pst[gr]-1, pgs[gr], 0, narm, ret);\n } #pragma omp parallel for num_threads(nthreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/dssgabriel/lattice-boltzmann-method/v5-openmp_parallel/src/lbm_comm.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided)", "context_chars": 100, "text": " return;\n }\n\n MPI_Status status;\n switch (comm_type) {\n case COMM_SEND:\n for (size_t x = 1; x < mesh_to_process->width - 2; x++) {\n for (size_t k = 0; k < DIRECTIONS; k++) {\n mesh->buffer[(x - 1) * DIRECTIONS + k] =\n Mesh_get_cell(mesh_to_process, x, y)[k];\n }\n } #pragma omp parallel for schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/dssgabriel/lattice-boltzmann-method/v5-openmp_parallel/src/lbm_comm.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided)", "context_chars": 100, "text": ">width - 2),\n MPI_DOUBLE, target_rank, 0, MPI_COMM_WORLD, &status);\n for (size_t x = 1; x < mesh_to_process->width - 2; x++) {\n for (size_t k = 0; k < DIRECTIONS; k++) {\n Mesh_get_cell(mesh_to_process, x, y)[k] =\n mesh->buffer[(x - 1) * DIRECTIONS + k];\n }\n } #pragma omp parallel for schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/dssgabriel/lattice-boltzmann-method/v5-openmp_parallel/src/lbm_phys.c", "omp_pragma_line": "#pragma omp parallel for collapse(2) schedule(static)", "context_chars": 100, "text": "h_out->width);\n assert(mesh_in->height == mesh_out->height);\n\n // Loop on all inner cells\n for (size_t j = 1; j < mesh_in->height - 1; j++) {\n for (size_t i = 1; i < mesh_in->width - 1; i++) {\n compute_cell_collision(Mesh_get_cell(mesh_out, i, j), Mesh_get_cell(mesh_in, i, j));\n }\n } #pragma omp parallel for collapse(2) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/dssgabriel/lattice-boltzmann-method/v5-openmp_parallel/src/lbm_phys.c", "omp_pragma_line": "#pragma omp parallel for collapse(3) schedule(static)", "context_chars": 100, "text": " }\n }\n}\n\nvoid propagation(Mesh* mesh_out, Mesh const* mesh_in)\n{\n // Loop on all cells\n for (size_t j = 0; j < mesh_out->height; j++) {\n for (size_t i = 0; i < mesh_out->width; i++) {\n // For all direction\n for (size_t k = 0; k < DIRECTIONS; k++) {\n // Compute destination point\n ssize_t ii = (i + direction_matrix[k][0]);\n ssize_t jj = (j + direction_matrix[k][1]);\n // Propagate to neighboor nodes\n if ((ii >= 0 && ii < mesh_out->width) &&\n (jj >= 0 && jj < mesh_out->height))\n {\n Mesh_get_cell(mesh_out, ii, jj)[k] =\n Mesh_get_cell(mesh_in, i, j)[k];\n }\n }\n }\n } #pragma omp parallel for collapse(3) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/dssgabriel/lattice-boltzmann-method/v6-fine_tuning/src/lbm_comm.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided)", "context_chars": 100, "text": "eturn;\n }\n\n MPI_Status status;\n switch (comm_type) {\n case COMM_SEND:\n //for (size_t x = 1; x < mesh_to_process->width - 2; x++) {\n for (size_t k = 0; k < DIRECTIONS; k++) {\n mesh->buffer[(x - 1) * DIRECTIONS + k] =\n Mesh_get_cell(mesh_to_process, x, y)[k];\n }\n } #pragma omp parallel for schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/dssgabriel/lattice-boltzmann-method/v6-fine_tuning/src/lbm_comm.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided)", "context_chars": 100, "text": "idth - 2),\n MPI_DOUBLE, target_rank, 0, MPI_COMM_WORLD, &status);\n //for (size_t x = 1; x < mesh_to_process->width - 2; x++) {\n for (size_t k = 0; k < DIRECTIONS; k++) {\n Mesh_get_cell(mesh_to_process, x, y)[k] =\n mesh->buffer[(x - 1) * DIRECTIONS + k];\n }\n } #pragma omp parallel for schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arneish/parallel-PCA-openmp/lab2_omp.c", "omp_pragma_line": "#pragma omp parallel for num_threads(1) private(i, j) collapse(2) schedule(static) ", "context_chars": 100, "text": "l2_diff);\n return l2_diff;\n}\n\nvoid transpose(float *M, int m, int n, float *M_T)\n{\n int i, j;\nfor (i = 0; i < m; i++)\n {\n for (j = 0; j < n; j++)\n {\n M_T[j * m + i] = M[i * n + j];\n }\n } #pragma omp parallel for num_threads(1) private(i, j) collapse(2) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arneish/parallel-PCA-openmp/lab2_omp.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k, sum, temp1, temp2) schedule(static)", "context_chars": 100, "text": "t *) malloc(sizeof(float)*n2*m2);\n transpose(M_2, m2, n2, M_2_T);\n int i, j, k, temp1, temp2;\nfor (i = 0; i < m1; i++)\n {\n temp1 = i*n1; \n for (j = 0; j < n2; j++)\n {\n sum = 0.0;\n temp2 = j*m2;\n for (k = 0; k < n1; k++)\n {\n sum += M_1[temp1 + k] * M_2_T[temp2 + k];\n }\n result[i * n2 + j] = sum;\n }\n } #pragma omp parallel for private(i, j, k, sum, temp1, temp2) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arneish/parallel-PCA-openmp/lab2_omp.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) reduction(+: sum_sq)", "context_chars": 100, "text": "+ i] - A_current[i * P + i]);\n }\n if (sum_sq>TOLERANCE)\n return norm = sqrtf(sum_sq);\n\nfor (i=0; i #pragma omp parallel for private(i, j) reduction(+: sum_sq)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arneish/parallel-PCA-openmp/lab2_omp.c", "omp_pragma_line": "#pragma omp parallel for private(i, j) collapse(2) schedule(static) ", "context_chars": 100, "text": " float *D_T = (float *)malloc(sizeof(float) * P * N);\n transpose(D, N, P, D_T);\n int i, j;\nfor (i = 0; i < N; i++)\n {\n for (j = 0; j < P; j++)\n {\n D_T[j * N + i] = D[i * P + j];\n }\n } #pragma omp parallel for private(i, j) collapse(2) schedule(static) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arneish/parallel-PCA-openmp/lab2_omp.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k, sum, temp1, temp2) schedule(static)", "context_chars": 100, "text": "K: %d, retention_: %f\\n\", *K, retention_);\n *D_HAT = (float *)malloc(sizeof(float) * N * (*K));\n\nfor (i=0; i #pragma omp parallel for private(i, j, k, sum, temp1, temp2) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/kianenigma/pmms-heat-dissipation/assignment_2/vecsort/vecsort.c", "omp_pragma_line": "#pragma omp parallel for num_threads(DATA_THREADS)", "context_chars": 100, "text": "ow], length * sizeof(int));\n }\n\n /* start sorting one by one */\n gettimeofday(&tv1, NULL);\nfor (row = 0; row < rows; row++) {\n long length = row_lengths[row];\n split_seq(b[row], 0, length, vector[row]);\n } #pragma omp parallel for num_threads(DATA_THREADS)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/kianenigma/pmms-heat-dissipation/assignment_2/heat_omp/compute.c", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": " tmp; }\n\n /* initialize halo on source */\n do_copy(h, w, src);\n\n /* compute */\nprivate(i, j)\\\n schedule(static)\\\n reduction(max: maxdiff)\\\n num_threads(p->nthreads)\n for (i = 1; i < h - 1; ++i) {\n for (j = 1; j < w - 1; ++j)\n {\n \n double w = (*c)[i][j];\n double restw = 1.0 - w;\n double v, v_old;\n v_old = (*src)[i][j];\n\n v = w * v_old +\n ((*src)[i+1][j ] + (*src)[i-1][j ] +\n (*src)[i ][j+1] + (*src)[i ][j-1]) * (restw * c_cdir) +\n ((*src)[i-1][j-1] + (*src)[i-1][j+1] +\n (*src)[i+1][j-1] + (*src)[i+1][j+1]) * (restw * c_cdiag);\n\n double diff = fabs(v - v_old);\n if (diff > maxdiff) maxdiff = diff;\n (*dst)[i][j] = v;\n\n }\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/random.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": "umber of threads: \");\n scanf(\"%d\", &t);\n double r[n];\n double p[n];\n double px[n];\n\n for( int i = 0 ; i #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/random.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": " < n ; i++)\n {\n sum = r[i]*r[i] + sum;\n }\n\n double temp[n];\n for( int i = 0; i #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/random.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": "nt i = 0; ifor(int i = 0 ; i < n ; i++)\n {\n #pragma omp parallel for reduction(+ : temp[i])\n for(int j = 0 ; j < n ; j++ )\n {\n temp[i] = A[i*n+j]*p[j] + temp[i];\n }\n } #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/random.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : temp[i])", "context_chars": 100, "text": " #pragma omp parallel for num_threads(t)\n for(int i = 0 ; i < n ; i++)\n {\n for(int j = 0 ; j < n ; j++ )\n {\n temp[i] = A[i*n+j]*p[j] + temp[i];\n } #pragma omp parallel for reduction(+ : temp[i])"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/random.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t) reduction(+ : num)", "context_chars": 100, "text": " )\n {\n temp[i] = A[i*n+j]*p[j] + temp[i];\n }\n }\n for(int j = 0 ; j < n ; j++)\n {\n num = num + temp[j]*p[j];\n } #pragma omp parallel for num_threads(t) reduction(+ : num)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/random.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": "j++)\n {\n num = num + temp[j]*p[j];\n }\n\n alpha = sum / num;\n\n for(int i = 0; i < n ; i++ )\n {\n px[i] = x[i];\n x[i] = x[i] + alpha*p[i];\n r[i] = r[i] - alpha*temp[i];\n } #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/random.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t) reduction(+ : beta)", "context_chars": 100, "text": " + alpha*p[i];\n r[i] = r[i] - alpha*temp[i];\n }\n\n double beta = 0;\n for(int i = 0 ; i < n ; i++)\n {\n beta = beta + r[i]*r[i];\n } #pragma omp parallel for num_threads(t) reduction(+ : beta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/random.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": " i++)\n {\n beta = beta + r[i]*r[i];\n }\n\n beta = beta / sum;\n\n for (int i = 0 ; i < n ; i++ )\n {\n p[i] = r[i] + beta*p[i];\n } #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/random.c", "omp_pragma_line": "#pragma omp parallel for num_threads(p) schedule(static, n) reduction(+:dxi)", "context_chars": 100, "text": ";\n\n for(k=0; k \\n\", k+1);\n for(int i=0; i #pragma omp parallel for num_threads(p) schedule(static, n) reduction(+:dxi)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/required.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": "mber of threads: \");\n scanf(\"%d\", &t);\n\n double r[n];\n double p[n];\n double px[n];\n\n for( int i = 0 ; i #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/required.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": " < n ; i++)\n {\n sum = r[i]*r[i] + sum;\n }\n\n double temp[n];\n for( int i = 0; i #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/required.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": "nt i = 0; ifor(int i = 0 ; i < n ; i++)\n {\n #pragma omp parallel for reduction(+ : temp[i])\n for(int j = 0 ; j < n ; j++ )\n {\n temp[i] = A[i*n+j]*p[j] + temp[i];\n }\n } #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/required.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : temp[i])", "context_chars": 100, "text": " #pragma omp parallel for num_threads(t)\n for(int i = 0 ; i < n ; i++)\n {\n for(int j = 0 ; j < n ; j++ )\n {\n temp[i] = A[i*n+j]*p[j] + temp[i];\n } #pragma omp parallel for reduction(+ : temp[i])"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/required.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t) reduction(+ : num)", "context_chars": 100, "text": " )\n {\n temp[i] = A[i*n+j]*p[j] + temp[i];\n }\n }\n for(int j = 0 ; j < n ; j++)\n {\n num = num + temp[j]*p[j];\n } #pragma omp parallel for num_threads(t) reduction(+ : num)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/required.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": "j++)\n {\n num = num + temp[j]*p[j];\n }\n\n alpha = sum / num;\n\n for(int i = 0; i < n ; i++ )\n {\n px[i] = x[i];\n x[i] = x[i] + alpha*p[i];\n r[i] = r[i] - alpha*temp[i];\n } #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/required.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t) reduction(+ : beta)", "context_chars": 100, "text": " + alpha*p[i];\n r[i] = r[i] - alpha*temp[i];\n }\n\n double beta = 0;\n for(int i = 0 ; i < n ; i++)\n {\n beta = beta + r[i]*r[i];\n } #pragma omp parallel for num_threads(t) reduction(+ : beta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/required.c", "omp_pragma_line": "#pragma omp parallel for num_threads(t)", "context_chars": 100, "text": " i++)\n {\n beta = beta + r[i]*r[i];\n }\n\n beta = beta / sum;\n\n for (int i = 0 ; i < n ; i++ )\n {\n p[i] = r[i] + beta*p[i];\n } #pragma omp parallel for num_threads(t)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/maitreyeepaliwal/Solving-System-of-linear-equations-in-parallel-and-serial/required.c", "omp_pragma_line": "#pragma omp parallel for num_threads(p) schedule(static, n) reduction(+:dxi)", "context_chars": 100, "text": ";\n\n for(k=0; k \\n\", k+1);\n for(int i=0; i #pragma omp parallel for num_threads(p) schedule(static, n) reduction(+:dxi)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Sitaras/Parallel-Systems-Project/HybridMPI/jacobi_hybrid.c", "omp_pragma_line": "#pragma omp parallel for collapse(2) shared(size,src,dst,xStart,yStart,deltaX,deltaY,alpha,omega,cx,cy,cc),private(y,x,updateVal),reduction(+:temp_error) schedule(static)", "context_chars": 100, "text": " double fx2,fy2;\n double temp_error = 0.0;\n double updateVal;\n double f;\n\n for (x = 2; x < (size-2); x++){\n for (y = 2; y < (size-2); y++){\n updateVal = ((SRC(x-1,y) + SRC(x+1,y))*cx + (SRC(x,y-1) + SRC(x,y+1))*cy + SRC(x,y)*cc - F(x,y))/cc;\n DST(x,y) = SRC(x,y) - omega*updateVal;\n temp_error += updateVal*updateVal;\n }\n } #pragma omp parallel for collapse(2) shared(size,src,dst,xStart,yStart,deltaX,deltaY,alpha,omega,cx,cy,cc),private(y,x,updateVal),reduction(+:temp_error) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Sitaras/Parallel-Systems-Project/HybridMPI/jacobi_hybrid.c", "omp_pragma_line": "#pragma omp parallel for shared(size,src,dst,xStart,yStart,deltaX,deltaY,alpha,omega,cx,cy,cc),private(y,x,updateVal),reduction(+:temp_error) schedule(static)", "context_chars": 100, "text": ")) - 2.0*(1.0-FY(y)*FY(y)))\n\n int x, y;\n double temp_error = 0.0;\n double updateVal;\n\n\n for(x=1;x<(size-1);x++){\n int y=1;\n updateVal = (\t(SRC(x-1,y) + SRC(x+1,y))*cx + (SRC(x,y-1) + SRC(x,y+1))*cy + SRC(x,y)*cc - F(x,y))/cc;\n DST(x,y) = SRC(x,y) - omega*updateVal;\n temp_error += updateVal*updateVal;\n\n y=size-2;\n updateVal = (\t(SRC(x-1,y) + SRC(x+1,y))*cx + (SRC(x,y-1) + SRC(x,y+1))*cy + SRC(x,y)*cc - F(x,y))/cc;\n DST(x,y) = SRC(x,y) - omega*updateVal;\n temp_error += updateVal*updateVal;\n } #pragma omp parallel for shared(size,src,dst,xStart,yStart,deltaX,deltaY,alpha,omega,cx,cy,cc),private(y,x,updateVal),reduction(+:temp_error) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Sitaras/Parallel-Systems-Project/HybridMPI/jacobi_hybrid.c", "omp_pragma_line": "#pragma omp parallel for shared(size,src,dst,xStart,yStart,deltaX,deltaY,alpha,omega,cx,cy,cc),private(y,x,updateVal),reduction(+:temp_error) schedule(static)", "context_chars": 100, "text": ";\n DST(x,y) = SRC(x,y) - omega*updateVal;\n temp_error += updateVal*updateVal;\n }\n\n\n for(y=2;y<(size-2);y++){\n int x=1;\n updateVal = (\t(SRC(x-1,y) + SRC(x+1,y))*cx + (SRC(x,y-1) + SRC(x,y+1))*cy + SRC(x,y)*cc - F(x,y))/cc;\n DST(x,y) = SRC(x,y) - omega*updateVal;\n temp_error += updateVal*updateVal;\n\n x=size-2;\n updateVal = (\t(SRC(x-1,y) + SRC(x+1,y))*cx + (SRC(x,y-1) + SRC(x,y+1))*cy + SRC(x,y)*cc - F(x,y))/cc;\n DST(x,y) = SRC(x,y) - omega*updateVal;\n temp_error += updateVal*updateVal;\n } #pragma omp parallel for shared(size,src,dst,xStart,yStart,deltaX,deltaY,alpha,omega,cx,cy,cc),private(y,x,updateVal),reduction(+:temp_error) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/stencil2d/StencilFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for shared(nRows,nPaddedCols, other, data)", "context_chars": 100, "text": "; iter < iterHighBound; iter++ )\n {\n /* apply the stencil operator */\n #pragma acc parallel loop collapse(2) independent present(data,other)\n for( unsigned int i = 1; i < (nRows-1); i++ )\n {\n // #pragma omp parallel for\n\t // #pragma acc loop independent\n\t for( unsigned int j = 1; j < (nPaddedCols-1); j++ )\n {\n float oldCenterValue = dval(data, nPaddedCols, i, j);\n float oldNSEWValues = dval(data, nPaddedCols, i - 1, j ) +\n dval(data, nPaddedCols, i + 1, j ) +\n dval(data, nPaddedCols, i, j - 1 ) +\n dval(data, nPaddedCols, i, j + 1 );\n float oldDiagonalValues = dval(data, nPaddedCols, i - 1, j - 1) +\n dval(data, nPaddedCols, i - 1, j + 1) +\n dval(data, nPaddedCols, i + 1, j - 1) +\n dval(data, nPaddedCols, i + 1, j + 1);\n\n float newVal = wCenter * oldCenterValue +\n wCardinal * oldNSEWValues +\n wDiagonal * oldDiagonalValues;\n dval(other, nPaddedCols, i, j ) = newVal;\n }\n } #pragma omp parallel for shared(nRows,nPaddedCols, other, data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/stencil2d/StencilFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ta,other)\n for( unsigned int i = 1; i < (nRows-1); i++ )\n {\n // // #pragma acc loop independent\n\t for( unsigned int j = 1; j < (nPaddedCols-1); j++ )\n {\n float oldCenterValue = dval(data, nPaddedCols, i, j);\n float oldNSEWValues = dval(data, nPaddedCols, i - 1, j ) +\n dval(data, nPaddedCols, i + 1, j ) +\n dval(data, nPaddedCols, i, j - 1 ) +\n dval(data, nPaddedCols, i, j + 1 );\n float oldDiagonalValues = dval(data, nPaddedCols, i - 1, j - 1) +\n dval(data, nPaddedCols, i - 1, j + 1) +\n dval(data, nPaddedCols, i + 1, j - 1) +\n dval(data, nPaddedCols, i + 1, j + 1);\n\n float newVal = wCenter * oldCenterValue +\n wCardinal * oldNSEWValues +\n wDiagonal * oldDiagonalValues;\n dval(other, nPaddedCols, i, j ) = newVal;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/stencil2d/StencilFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for shared(nRows,nPaddedCols,other,data)", "context_chars": 100, "text": "this successfully\n * within OpenACC parallel region.\n */\n\t // #pragma acc parallel loop collapse(2) independent present(data,other)\n for( unsigned int i = 1; i < (nRows - 1); i++ )\n {\n\t //#pragma omp parallel for\n\t //#pragma acc loop independent\n for( unsigned int j = 1; j < (nCols - 1); j++ )\n {\n dval(data, nPaddedCols, i, j) = dval(other, nPaddedCols, i, j);\n }\n } #pragma omp parallel for shared(nRows,nPaddedCols,other,data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/stencil2d/StencilFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "sent(data,other)\n for( unsigned int i = 1; i < (nRows - 1); i++ )\n {\n\t ////#pragma acc loop independent\n for( unsigned int j = 1; j < (nCols - 1); j++ )\n {\n dval(data, nPaddedCols, i, j) = dval(other, nPaddedCols, i, j);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/stencil2d/StencilFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for shared(nRows, nPaddedCols, other, data)", "context_chars": 100, "text": "iter < iterHighBound; iter++ )\n {\n /* apply the stencil operator */\n //#pragma acc parallel loop collapse(2) independent present(other,data)\n for( unsigned int i = 1; i < (nRows-1); i++ )\n {\n for( unsigned int j = 1; j < (nPaddedCols-1); j++ )\n {\n double oldCenterValue = dval(data, nPaddedCols, i, j);\n double oldNSEWValues = dval(data, nPaddedCols, i - 1, j ) +\n dval(data, nPaddedCols, i + 1, j ) +\n dval(data, nPaddedCols, i, j - 1 ) +\n dval(data, nPaddedCols, i, j + 1 );\n double oldDiagonalValues = dval(data, nPaddedCols, i - 1, j - 1) +\n dval(data, nPaddedCols, i - 1, j + 1) +\n dval(data, nPaddedCols, i + 1, j - 1) +\n dval(data, nPaddedCols, i + 1, j + 1);\n\n double newVal = wCenter * oldCenterValue +\n wCardinal * oldNSEWValues +\n wDiagonal * oldDiagonalValues;\n dval(other, nPaddedCols, i, j ) = newVal;\n }\n } #pragma omp parallel for shared(nRows, nPaddedCols, other, data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/stencil2d/StencilFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for shared(nRows, nCols, nPaddedCols, other, data)", "context_chars": 100, "text": " to do this successfully\n * within OpenACC parallel region.\n */\n #pragma acc parallel loop collapse(2) independent present(other,data)\n for( unsigned int i = 1; i < (nRows - 1); i++ )\n {\n\t //#pragma omp parallel for\n\t //#pragma acc loop independent\n for( unsigned int j = 1; j < (nCols - 1); j++ )\n {\n dval(data, nPaddedCols, i, j) = dval(other, nPaddedCols, i, j);\n }\n } #pragma omp parallel for shared(nRows, nCols, nPaddedCols, other, data)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/stencil2d/StencilFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "sent(other,data)\n for( unsigned int i = 1; i < (nRows - 1); i++ )\n {\n\t ////#pragma acc loop independent\n for( unsigned int j = 1; j < (nCols - 1); j++ )\n {\n dval(data, nPaddedCols, i, j) = dval(other, nPaddedCols, i, j);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/md/MDFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "o the accelerator\n for( unsigned int iter = 0; iter < nIters; iter++ )\n {\n #pragma acc parallel loop independent\n for (int i = 0; i < nAtom; i++)\n {\n float4 ipos = position[i];\n float3 f = {0.0f, 0.0f, 0.0f};\n\t\t #pragma acc loop\n for (int j = 0; j < maxNeighbors; j++)\n {\n int jidx = neighborList[j*nAtom + i];\n float4 jpos = position[jidx];\n \n // Calculate distance\n float delx = ipos.x - jpos.x;\n float dely = ipos.y - jpos.y;\n float delz = ipos.z - jpos.z;\n float r2inv = delx*delx + dely*dely + delz*delz;\n\n // If distance is less than cutoff, calculate force\n if (r2inv < cutsq) \n {\n r2inv = 1.0f/r2inv;\n float r6inv = r2inv * r2inv * r2inv;\n float force = r2inv*r6inv*(lj1*r6inv - lj2);\n\n f.x += delx * force;\n f.y += dely * force;\n f.z += delz * force;\n }\n } // for neighbors \n force[i] = f;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/md/MDFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "o the accelerator\n for( unsigned int iter = 0; iter < nIters; iter++ )\n {\n #pragma acc parallel loop independent\n for (int i = 0; i < nAtom; i++)\n {\n double4 ipos = position[i];\n double3 f = {0.0f, 0.0f, 0.0f};\n\t\t #pragma acc loop\n for (int j = 0; j < maxNeighbors; j++)\n {\n int jidx = neighborList[j*nAtom + i];\n double4 jpos = position[jidx];\n \n // Calculate distance\n double delx = ipos.x - jpos.x;\n double dely = ipos.y - jpos.y;\n double delz = ipos.z - jpos.z;\n double r2inv = delx*delx + dely*dely + delz*delz;\n\n // If distance is less than cutoff, calculate force\n if (r2inv < cutsq) \n {\n r2inv = 1.0f/r2inv;\n double r6inv = r2inv * r2inv * r2inv;\n double force = r2inv*r6inv*(lj1*r6inv - lj2);\n\n f.x += delx * force;\n f.y += dely * force;\n f.z += delz * force;\n }\n } // for neighbors \n force[i] = f;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/reduction/ReduceFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) default(none) shared(nItems,idata)", "context_chars": 100, "text": " for( unsigned int iter = 0; iter < nIters; iter++ )\n {\n sum = 0.0;\n\n //#pragma acc loop reduction( +:sum ) independent \n #pragma acc kernels loop reduction( +:sum ) \n for( unsigned int i = 0; i < nItems; i++ )\n {\n sum += idata[i];\n } #pragma omp parallel for reduction(+:sum) default(none) shared(nItems,idata)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/SHOC/openacc/level1/reduction/ReduceFuncs.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum) default(none) shared(nItems, idata)", "context_chars": 100, "text": " for( unsigned int iter = 0; iter < nIters; iter++ )\n {\n sum = 0.0;\n\n //#pragma acc loop reduction( +:sum ) independent\n #pragma acc kernels loop reduction( +:sum ) \n for( unsigned int i = 0; i < nItems; i++ )\n {\n sum += idata[i];\n } #pragma omp parallel for reduction(+:sum) default(none) shared(nItems, idata)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/BT/x_solve.c", "omp_pragma_line": "#pragma omp parallel for default(shared) shared(isize) private(i,j,k,m,n)", "context_chars": 100, "text": "eled f) and n jacobians\n //---------------------------------------------------------------------\n for (k = 1; k <= grid_points[2]-2; k++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (i = 0; i <= isize; i++) {\n tmp1 = rho_i[k][j][i];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n //-------------------------------------------------------------------\n // \n //-------------------------------------------------------------------\n fjac[i][0][0] = 0.0;\n fjac[i][1][0] = 1.0;\n fjac[i][2][0] = 0.0;\n fjac[i][3][0] = 0.0;\n fjac[i][4][0] = 0.0;\n\n fjac[i][0][1] = -(u[k][j][i][1] * tmp2 * u[k][j][i][1])\n + c2 * qs[k][j][i];\n fjac[i][1][1] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] );\n fjac[i][2][1] = - c2 * ( u[k][j][i][2] * tmp1 );\n fjac[i][3][1] = - c2 * ( u[k][j][i][3] * tmp1 );\n fjac[i][4][1] = c2;\n\n fjac[i][0][2] = - ( u[k][j][i][1]*u[k][j][i][2] ) * tmp2;\n fjac[i][1][2] = u[k][j][i][2] * tmp1;\n fjac[i][2][2] = u[k][j][i][1] * tmp1;\n fjac[i][3][2] = 0.0;\n fjac[i][4][2] = 0.0;\n\n fjac[i][0][3] = - ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2;\n fjac[i][1][3] = u[k][j][i][3] * tmp1;\n fjac[i][2][3] = 0.0;\n fjac[i][3][3] = u[k][j][i][1] * tmp1;\n fjac[i][4][3] = 0.0;\n\n fjac[i][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )\n * ( u[k][j][i][1] * tmp2 );\n fjac[i][1][4] = c1 * u[k][j][i][4] * tmp1 \n - c2 * ( u[k][j][i][1]*u[k][j][i][1] * tmp2 + qs[k][j][i] );\n fjac[i][2][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][1] ) * tmp2;\n fjac[i][3][4] = - c2 * ( u[k][j][i][3]*u[k][j][i][1] ) * tmp2;\n fjac[i][4][4] = c1 * ( u[k][j][i][1] * tmp1 );\n\n njac[i][0][0] = 0.0;\n njac[i][1][0] = 0.0;\n njac[i][2][0] = 0.0;\n njac[i][3][0] = 0.0;\n njac[i][4][0] = 0.0;\n\n njac[i][0][1] = - con43 * c3c4 * tmp2 * u[k][j][i][1];\n njac[i][1][1] = con43 * c3c4 * tmp1;\n njac[i][2][1] = 0.0;\n njac[i][3][1] = 0.0;\n njac[i][4][1] = 0.0;\n\n njac[i][0][2] = - c3c4 * tmp2 * u[k][j][i][2];\n njac[i][1][2] = 0.0;\n njac[i][2][2] = c3c4 * tmp1;\n njac[i][3][2] = 0.0;\n njac[i][4][2] = 0.0;\n\n njac[i][0][3] = - c3c4 * tmp2 * u[k][j][i][3];\n njac[i][1][3] = 0.0;\n njac[i][2][3] = 0.0;\n njac[i][3][3] = c3c4 * tmp1;\n njac[i][4][3] = 0.0;\n\n njac[i][0][4] = - ( con43 * c3c4\n - c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1])\n - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2])\n - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3])\n - c1345 * tmp2 * u[k][j][i][4];\n\n njac[i][1][4] = ( con43 * c3c4\n - c1345 ) * tmp2 * u[k][j][i][1];\n njac[i][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];\n njac[i][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3];\n njac[i][4][4] = ( c1345 ) * tmp1;\n }\n //---------------------------------------------------------------------\n // now jacobians set, so form left hand side in x direction\n //---------------------------------------------------------------------\n lhsinit(lhs, isize);\n for (i = 1; i <= isize-1; i++) {\n tmp1 = dt * tx1;\n tmp2 = dt * tx2;\n\n lhs[i][AA][0][0] = - tmp2 * fjac[i-1][0][0]\n - tmp1 * njac[i-1][0][0]\n - tmp1 * dx1; \n lhs[i][AA][1][0] = - tmp2 * fjac[i-1][1][0]\n - tmp1 * njac[i-1][1][0];\n lhs[i][AA][2][0] = - tmp2 * fjac[i-1][2][0]\n - tmp1 * njac[i-1][2][0];\n lhs[i][AA][3][0] = - tmp2 * fjac[i-1][3][0]\n - tmp1 * njac[i-1][3][0];\n lhs[i][AA][4][0] = - tmp2 * fjac[i-1][4][0]\n - tmp1 * njac[i-1][4][0];\n\n lhs[i][AA][0][1] = - tmp2 * fjac[i-1][0][1]\n - tmp1 * njac[i-1][0][1];\n lhs[i][AA][1][1] = - tmp2 * fjac[i-1][1][1]\n - tmp1 * njac[i-1][1][1]\n - tmp1 * dx2;\n lhs[i][AA][2][1] = - tmp2 * fjac[i-1][2][1]\n - tmp1 * njac[i-1][2][1];\n lhs[i][AA][3][1] = - tmp2 * fjac[i-1][3][1]\n - tmp1 * njac[i-1][3][1];\n lhs[i][AA][4][1] = - tmp2 * fjac[i-1][4][1]\n - tmp1 * njac[i-1][4][1];\n\n lhs[i][AA][0][2] = - tmp2 * fjac[i-1][0][2]\n - tmp1 * njac[i-1][0][2];\n lhs[i][AA][1][2] = - tmp2 * fjac[i-1][1][2]\n - tmp1 * njac[i-1][1][2];\n lhs[i][AA][2][2] = - tmp2 * fjac[i-1][2][2]\n - tmp1 * njac[i-1][2][2]\n - tmp1 * dx3;\n lhs[i][AA][3][2] = - tmp2 * fjac[i-1][3][2]\n - tmp1 * njac[i-1][3][2];\n lhs[i][AA][4][2] = - tmp2 * fjac[i-1][4][2]\n - tmp1 * njac[i-1][4][2];\n\n lhs[i][AA][0][3] = - tmp2 * fjac[i-1][0][3]\n - tmp1 * njac[i-1][0][3];\n lhs[i][AA][1][3] = - tmp2 * fjac[i-1][1][3]\n - tmp1 * njac[i-1][1][3];\n lhs[i][AA][2][3] = - tmp2 * fjac[i-1][2][3]\n - tmp1 * njac[i-1][2][3];\n lhs[i][AA][3][3] = - tmp2 * fjac[i-1][3][3]\n - tmp1 * njac[i-1][3][3]\n - tmp1 * dx4;\n lhs[i][AA][4][3] = - tmp2 * fjac[i-1][4][3]\n - tmp1 * njac[i-1][4][3];\n\n lhs[i][AA][0][4] = - tmp2 * fjac[i-1][0][4]\n - tmp1 * njac[i-1][0][4];\n lhs[i][AA][1][4] = - tmp2 * fjac[i-1][1][4]\n - tmp1 * njac[i-1][1][4];\n lhs[i][AA][2][4] = - tmp2 * fjac[i-1][2][4]\n - tmp1 * njac[i-1][2][4];\n lhs[i][AA][3][4] = - tmp2 * fjac[i-1][3][4]\n - tmp1 * njac[i-1][3][4];\n lhs[i][AA][4][4] = - tmp2 * fjac[i-1][4][4]\n - tmp1 * njac[i-1][4][4]\n - tmp1 * dx5;\n\n lhs[i][BB][0][0] = 1.0\n + tmp1 * 2.0 * njac[i][0][0]\n + tmp1 * 2.0 * dx1;\n lhs[i][BB][1][0] = tmp1 * 2.0 * njac[i][1][0];\n lhs[i][BB][2][0] = tmp1 * 2.0 * njac[i][2][0];\n lhs[i][BB][3][0] = tmp1 * 2.0 * njac[i][3][0];\n lhs[i][BB][4][0] = tmp1 * 2.0 * njac[i][4][0];\n\n lhs[i][BB][0][1] = tmp1 * 2.0 * njac[i][0][1];\n lhs[i][BB][1][1] = 1.0\n + tmp1 * 2.0 * njac[i][1][1]\n + tmp1 * 2.0 * dx2;\n lhs[i][BB][2][1] = tmp1 * 2.0 * njac[i][2][1];\n lhs[i][BB][3][1] = tmp1 * 2.0 * njac[i][3][1];\n lhs[i][BB][4][1] = tmp1 * 2.0 * njac[i][4][1];\n\n lhs[i][BB][0][2] = tmp1 * 2.0 * njac[i][0][2];\n lhs[i][BB][1][2] = tmp1 * 2.0 * njac[i][1][2];\n lhs[i][BB][2][2] = 1.0\n + tmp1 * 2.0 * njac[i][2][2]\n + tmp1 * 2.0 * dx3;\n lhs[i][BB][3][2] = tmp1 * 2.0 * njac[i][3][2];\n lhs[i][BB][4][2] = tmp1 * 2.0 * njac[i][4][2];\n\n lhs[i][BB][0][3] = tmp1 * 2.0 * njac[i][0][3];\n lhs[i][BB][1][3] = tmp1 * 2.0 * njac[i][1][3];\n lhs[i][BB][2][3] = tmp1 * 2.0 * njac[i][2][3];\n lhs[i][BB][3][3] = 1.0\n + tmp1 * 2.0 * njac[i][3][3]\n + tmp1 * 2.0 * dx4;\n lhs[i][BB][4][3] = tmp1 * 2.0 * njac[i][4][3];\n\n lhs[i][BB][0][4] = tmp1 * 2.0 * njac[i][0][4];\n lhs[i][BB][1][4] = tmp1 * 2.0 * njac[i][1][4];\n lhs[i][BB][2][4] = tmp1 * 2.0 * njac[i][2][4];\n lhs[i][BB][3][4] = tmp1 * 2.0 * njac[i][3][4];\n lhs[i][BB][4][4] = 1.0\n + tmp1 * 2.0 * njac[i][4][4]\n + tmp1 * 2.0 * dx5;\n\n lhs[i][CC][0][0] = tmp2 * fjac[i+1][0][0]\n - tmp1 * njac[i+1][0][0]\n - tmp1 * dx1;\n lhs[i][CC][1][0] = tmp2 * fjac[i+1][1][0]\n - tmp1 * njac[i+1][1][0];\n lhs[i][CC][2][0] = tmp2 * fjac[i+1][2][0]\n - tmp1 * njac[i+1][2][0];\n lhs[i][CC][3][0] = tmp2 * fjac[i+1][3][0]\n - tmp1 * njac[i+1][3][0];\n lhs[i][CC][4][0] = tmp2 * fjac[i+1][4][0]\n - tmp1 * njac[i+1][4][0];\n\n lhs[i][CC][0][1] = tmp2 * fjac[i+1][0][1]\n - tmp1 * njac[i+1][0][1];\n lhs[i][CC][1][1] = tmp2 * fjac[i+1][1][1]\n - tmp1 * njac[i+1][1][1]\n - tmp1 * dx2;\n lhs[i][CC][2][1] = tmp2 * fjac[i+1][2][1]\n - tmp1 * njac[i+1][2][1];\n lhs[i][CC][3][1] = tmp2 * fjac[i+1][3][1]\n - tmp1 * njac[i+1][3][1];\n lhs[i][CC][4][1] = tmp2 * fjac[i+1][4][1]\n - tmp1 * njac[i+1][4][1];\n\n lhs[i][CC][0][2] = tmp2 * fjac[i+1][0][2]\n - tmp1 * njac[i+1][0][2];\n lhs[i][CC][1][2] = tmp2 * fjac[i+1][1][2]\n - tmp1 * njac[i+1][1][2];\n lhs[i][CC][2][2] = tmp2 * fjac[i+1][2][2]\n - tmp1 * njac[i+1][2][2]\n - tmp1 * dx3;\n lhs[i][CC][3][2] = tmp2 * fjac[i+1][3][2]\n - tmp1 * njac[i+1][3][2];\n lhs[i][CC][4][2] = tmp2 * fjac[i+1][4][2]\n - tmp1 * njac[i+1][4][2];\n\n lhs[i][CC][0][3] = tmp2 * fjac[i+1][0][3]\n - tmp1 * njac[i+1][0][3];\n lhs[i][CC][1][3] = tmp2 * fjac[i+1][1][3]\n - tmp1 * njac[i+1][1][3];\n lhs[i][CC][2][3] = tmp2 * fjac[i+1][2][3]\n - tmp1 * njac[i+1][2][3];\n lhs[i][CC][3][3] = tmp2 * fjac[i+1][3][3]\n - tmp1 * njac[i+1][3][3]\n - tmp1 * dx4;\n lhs[i][CC][4][3] = tmp2 * fjac[i+1][4][3]\n - tmp1 * njac[i+1][4][3];\n\n lhs[i][CC][0][4] = tmp2 * fjac[i+1][0][4]\n - tmp1 * njac[i+1][0][4];\n lhs[i][CC][1][4] = tmp2 * fjac[i+1][1][4]\n - tmp1 * njac[i+1][1][4];\n lhs[i][CC][2][4] = tmp2 * fjac[i+1][2][4]\n - tmp1 * njac[i+1][2][4];\n lhs[i][CC][3][4] = tmp2 * fjac[i+1][3][4]\n - tmp1 * njac[i+1][3][4];\n lhs[i][CC][4][4] = tmp2 * fjac[i+1][4][4]\n - tmp1 * njac[i+1][4][4]\n - tmp1 * dx5;\n }\n\n //---------------------------------------------------------------------\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // performs guaussian elimination on this cell.\n // \n // assumes that unpacking routines for non-first cells \n // preload C' and rhs' from previous cell.\n // \n // assumed send happens outside this routine, but that\n // c'(IMAX) and rhs'(IMAX) will be sent to next cell\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // outer most do loops - sweeping in i direction\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // multiply c[k][j][0] by b_inverse and copy back to c\n // multiply rhs(0) by b_inverse(0) and copy to rhs\n //---------------------------------------------------------------------\n binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][j][0] );\n\n //---------------------------------------------------------------------\n // begin inner most do loop\n // do all the elements of the cell unless last \n //---------------------------------------------------------------------\n for (i = 1; i <= isize-1; i++) {\n //-------------------------------------------------------------------\n // rhs(i) = rhs(i) - A*rhs(i-1)\n //-------------------------------------------------------------------\n matvec_sub(lhs[i][AA], rhs[k][j][i-1], rhs[k][j][i]);\n\n //-------------------------------------------------------------------\n // B(i) = B(i) - C(i-1)*A(i)\n //-------------------------------------------------------------------\n matmul_sub(lhs[i][AA], lhs[i-1][CC], lhs[i][BB]);\n\n\n //-------------------------------------------------------------------\n // multiply c[k][j][i] by b_inverse and copy back to c\n // multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs\n //-------------------------------------------------------------------\n binvcrhs( lhs[i][BB], lhs[i][CC], rhs[k][j][i] );\n }\n\n //---------------------------------------------------------------------\n // rhs(isize) = rhs(isize) - A*rhs(isize-1)\n //---------------------------------------------------------------------\n matvec_sub(lhs[isize][AA], rhs[k][j][isize-1], rhs[k][j][isize]);\n\n //---------------------------------------------------------------------\n // B(isize) = B(isize) - C(isize-1)*A(isize)\n //---------------------------------------------------------------------\n matmul_sub(lhs[isize][AA], lhs[isize-1][CC], lhs[isize][BB]);\n\n //---------------------------------------------------------------------\n // multiply rhs() by b_inverse() and copy to rhs\n //---------------------------------------------------------------------\n binvrhs( lhs[isize][BB], rhs[k][j][isize] );\n\n //---------------------------------------------------------------------\n // back solve: if last cell, then generate U(isize)=rhs(isize)\n // else assume U(isize) is loaded in un pack backsub_info\n // so just use it\n // after u(istart) will be sent to next cell\n //---------------------------------------------------------------------\n for (i = isize-1; i >=0; i--) {\n for (m = 0; m < BLOCK_SIZE; m++) {\n for (n = 0; n < BLOCK_SIZE; n++) {\n rhs[k][j][i][m] = rhs[k][j][i][m] \n - lhs[i][CC][n][m]*rhs[k][j][i+1][n];\n }\n }\n }\n }\n } #pragma omp parallel for default(shared) shared(isize) private(i,j,k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/BT/add.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,m)", "context_chars": 100, "text": "------------------------------\nvoid add()\n{\n int i, j, k, m;\n\n if (timeron) timer_start(t_add);\n for (k = 1; k <= grid_points[2]-2; k++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (m = 0; m < 5; m++) {\n u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m];\n }\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/BT/z_solve.c", "omp_pragma_line": "#pragma omp parallel for default(shared) shared(ksize) private(i,j,k,m,n)", "context_chars": 100, "text": "eled f) and s jacobians\n //---------------------------------------------------------------------\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 0; k <= ksize; k++) {\n tmp1 = 1.0 / u[k][j][i][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n fjac[k][0][0] = 0.0;\n fjac[k][1][0] = 0.0;\n fjac[k][2][0] = 0.0;\n fjac[k][3][0] = 1.0;\n fjac[k][4][0] = 0.0;\n\n fjac[k][0][1] = - ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2;\n fjac[k][1][1] = u[k][j][i][3] * tmp1;\n fjac[k][2][1] = 0.0;\n fjac[k][3][1] = u[k][j][i][1] * tmp1;\n fjac[k][4][1] = 0.0;\n\n fjac[k][0][2] = - ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2;\n fjac[k][1][2] = 0.0;\n fjac[k][2][2] = u[k][j][i][3] * tmp1;\n fjac[k][3][2] = u[k][j][i][2] * tmp1;\n fjac[k][4][2] = 0.0;\n\n fjac[k][0][3] = - (u[k][j][i][3]*u[k][j][i][3] * tmp2 ) \n + c2 * qs[k][j][i];\n fjac[k][1][3] = - c2 * u[k][j][i][1] * tmp1;\n fjac[k][2][3] = - c2 * u[k][j][i][2] * tmp1;\n fjac[k][3][3] = ( 2.0 - c2 ) * u[k][j][i][3] * tmp1;\n fjac[k][4][3] = c2;\n\n fjac[k][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )\n * u[k][j][i][3] * tmp2;\n fjac[k][1][4] = - c2 * ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2;\n fjac[k][2][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2;\n fjac[k][3][4] = c1 * ( u[k][j][i][4] * tmp1 )\n - c2 * ( qs[k][j][i] + u[k][j][i][3]*u[k][j][i][3] * tmp2 );\n fjac[k][4][4] = c1 * u[k][j][i][3] * tmp1;\n\n njac[k][0][0] = 0.0;\n njac[k][1][0] = 0.0;\n njac[k][2][0] = 0.0;\n njac[k][3][0] = 0.0;\n njac[k][4][0] = 0.0;\n\n njac[k][0][1] = - c3c4 * tmp2 * u[k][j][i][1];\n njac[k][1][1] = c3c4 * tmp1;\n njac[k][2][1] = 0.0;\n njac[k][3][1] = 0.0;\n njac[k][4][1] = 0.0;\n\n njac[k][0][2] = - c3c4 * tmp2 * u[k][j][i][2];\n njac[k][1][2] = 0.0;\n njac[k][2][2] = c3c4 * tmp1;\n njac[k][3][2] = 0.0;\n njac[k][4][2] = 0.0;\n\n njac[k][0][3] = - con43 * c3c4 * tmp2 * u[k][j][i][3];\n njac[k][1][3] = 0.0;\n njac[k][2][3] = 0.0;\n njac[k][3][3] = con43 * c3 * c4 * tmp1;\n njac[k][4][3] = 0.0;\n\n njac[k][0][4] = - ( c3c4\n - c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1])\n - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2])\n - ( con43 * c3c4\n - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3])\n - c1345 * tmp2 * u[k][j][i][4];\n\n njac[k][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1];\n njac[k][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];\n njac[k][3][4] = ( con43 * c3c4\n - c1345 ) * tmp2 * u[k][j][i][3];\n njac[k][4][4] = ( c1345 )* tmp1;\n }\n\n //---------------------------------------------------------------------\n // now jacobians set, so form left hand side in z direction\n //---------------------------------------------------------------------\n lhsinit(lhs, ksize);\n for (k = 1; k <= ksize-1; k++) {\n tmp1 = dt * tz1;\n tmp2 = dt * tz2;\n\n lhs[k][AA][0][0] = - tmp2 * fjac[k-1][0][0]\n - tmp1 * njac[k-1][0][0]\n - tmp1 * dz1; \n lhs[k][AA][1][0] = - tmp2 * fjac[k-1][1][0]\n - tmp1 * njac[k-1][1][0];\n lhs[k][AA][2][0] = - tmp2 * fjac[k-1][2][0]\n - tmp1 * njac[k-1][2][0];\n lhs[k][AA][3][0] = - tmp2 * fjac[k-1][3][0]\n - tmp1 * njac[k-1][3][0];\n lhs[k][AA][4][0] = - tmp2 * fjac[k-1][4][0]\n - tmp1 * njac[k-1][4][0];\n\n lhs[k][AA][0][1] = - tmp2 * fjac[k-1][0][1]\n - tmp1 * njac[k-1][0][1];\n lhs[k][AA][1][1] = - tmp2 * fjac[k-1][1][1]\n - tmp1 * njac[k-1][1][1]\n - tmp1 * dz2;\n lhs[k][AA][2][1] = - tmp2 * fjac[k-1][2][1]\n - tmp1 * njac[k-1][2][1];\n lhs[k][AA][3][1] = - tmp2 * fjac[k-1][3][1]\n - tmp1 * njac[k-1][3][1];\n lhs[k][AA][4][1] = - tmp2 * fjac[k-1][4][1]\n - tmp1 * njac[k-1][4][1];\n\n lhs[k][AA][0][2] = - tmp2 * fjac[k-1][0][2]\n - tmp1 * njac[k-1][0][2];\n lhs[k][AA][1][2] = - tmp2 * fjac[k-1][1][2]\n - tmp1 * njac[k-1][1][2];\n lhs[k][AA][2][2] = - tmp2 * fjac[k-1][2][2]\n - tmp1 * njac[k-1][2][2]\n - tmp1 * dz3;\n lhs[k][AA][3][2] = - tmp2 * fjac[k-1][3][2]\n - tmp1 * njac[k-1][3][2];\n lhs[k][AA][4][2] = - tmp2 * fjac[k-1][4][2]\n - tmp1 * njac[k-1][4][2];\n\n lhs[k][AA][0][3] = - tmp2 * fjac[k-1][0][3]\n - tmp1 * njac[k-1][0][3];\n lhs[k][AA][1][3] = - tmp2 * fjac[k-1][1][3]\n - tmp1 * njac[k-1][1][3];\n lhs[k][AA][2][3] = - tmp2 * fjac[k-1][2][3]\n - tmp1 * njac[k-1][2][3];\n lhs[k][AA][3][3] = - tmp2 * fjac[k-1][3][3]\n - tmp1 * njac[k-1][3][3]\n - tmp1 * dz4;\n lhs[k][AA][4][3] = - tmp2 * fjac[k-1][4][3]\n - tmp1 * njac[k-1][4][3];\n\n lhs[k][AA][0][4] = - tmp2 * fjac[k-1][0][4]\n - tmp1 * njac[k-1][0][4];\n lhs[k][AA][1][4] = - tmp2 * fjac[k-1][1][4]\n - tmp1 * njac[k-1][1][4];\n lhs[k][AA][2][4] = - tmp2 * fjac[k-1][2][4]\n - tmp1 * njac[k-1][2][4];\n lhs[k][AA][3][4] = - tmp2 * fjac[k-1][3][4]\n - tmp1 * njac[k-1][3][4];\n lhs[k][AA][4][4] = - tmp2 * fjac[k-1][4][4]\n - tmp1 * njac[k-1][4][4]\n - tmp1 * dz5;\n\n lhs[k][BB][0][0] = 1.0\n + tmp1 * 2.0 * njac[k][0][0]\n + tmp1 * 2.0 * dz1;\n lhs[k][BB][1][0] = tmp1 * 2.0 * njac[k][1][0];\n lhs[k][BB][2][0] = tmp1 * 2.0 * njac[k][2][0];\n lhs[k][BB][3][0] = tmp1 * 2.0 * njac[k][3][0];\n lhs[k][BB][4][0] = tmp1 * 2.0 * njac[k][4][0];\n\n lhs[k][BB][0][1] = tmp1 * 2.0 * njac[k][0][1];\n lhs[k][BB][1][1] = 1.0\n + tmp1 * 2.0 * njac[k][1][1]\n + tmp1 * 2.0 * dz2;\n lhs[k][BB][2][1] = tmp1 * 2.0 * njac[k][2][1];\n lhs[k][BB][3][1] = tmp1 * 2.0 * njac[k][3][1];\n lhs[k][BB][4][1] = tmp1 * 2.0 * njac[k][4][1];\n\n lhs[k][BB][0][2] = tmp1 * 2.0 * njac[k][0][2];\n lhs[k][BB][1][2] = tmp1 * 2.0 * njac[k][1][2];\n lhs[k][BB][2][2] = 1.0\n + tmp1 * 2.0 * njac[k][2][2]\n + tmp1 * 2.0 * dz3;\n lhs[k][BB][3][2] = tmp1 * 2.0 * njac[k][3][2];\n lhs[k][BB][4][2] = tmp1 * 2.0 * njac[k][4][2];\n\n lhs[k][BB][0][3] = tmp1 * 2.0 * njac[k][0][3];\n lhs[k][BB][1][3] = tmp1 * 2.0 * njac[k][1][3];\n lhs[k][BB][2][3] = tmp1 * 2.0 * njac[k][2][3];\n lhs[k][BB][3][3] = 1.0\n + tmp1 * 2.0 * njac[k][3][3]\n + tmp1 * 2.0 * dz4;\n lhs[k][BB][4][3] = tmp1 * 2.0 * njac[k][4][3];\n\n lhs[k][BB][0][4] = tmp1 * 2.0 * njac[k][0][4];\n lhs[k][BB][1][4] = tmp1 * 2.0 * njac[k][1][4];\n lhs[k][BB][2][4] = tmp1 * 2.0 * njac[k][2][4];\n lhs[k][BB][3][4] = tmp1 * 2.0 * njac[k][3][4];\n lhs[k][BB][4][4] = 1.0\n + tmp1 * 2.0 * njac[k][4][4] \n + tmp1 * 2.0 * dz5;\n\n lhs[k][CC][0][0] = tmp2 * fjac[k+1][0][0]\n - tmp1 * njac[k+1][0][0]\n - tmp1 * dz1;\n lhs[k][CC][1][0] = tmp2 * fjac[k+1][1][0]\n - tmp1 * njac[k+1][1][0];\n lhs[k][CC][2][0] = tmp2 * fjac[k+1][2][0]\n - tmp1 * njac[k+1][2][0];\n lhs[k][CC][3][0] = tmp2 * fjac[k+1][3][0]\n - tmp1 * njac[k+1][3][0];\n lhs[k][CC][4][0] = tmp2 * fjac[k+1][4][0]\n - tmp1 * njac[k+1][4][0];\n\n lhs[k][CC][0][1] = tmp2 * fjac[k+1][0][1]\n - tmp1 * njac[k+1][0][1];\n lhs[k][CC][1][1] = tmp2 * fjac[k+1][1][1]\n - tmp1 * njac[k+1][1][1]\n - tmp1 * dz2;\n lhs[k][CC][2][1] = tmp2 * fjac[k+1][2][1]\n - tmp1 * njac[k+1][2][1];\n lhs[k][CC][3][1] = tmp2 * fjac[k+1][3][1]\n - tmp1 * njac[k+1][3][1];\n lhs[k][CC][4][1] = tmp2 * fjac[k+1][4][1]\n - tmp1 * njac[k+1][4][1];\n\n lhs[k][CC][0][2] = tmp2 * fjac[k+1][0][2]\n - tmp1 * njac[k+1][0][2];\n lhs[k][CC][1][2] = tmp2 * fjac[k+1][1][2]\n - tmp1 * njac[k+1][1][2];\n lhs[k][CC][2][2] = tmp2 * fjac[k+1][2][2]\n - tmp1 * njac[k+1][2][2]\n - tmp1 * dz3;\n lhs[k][CC][3][2] = tmp2 * fjac[k+1][3][2]\n - tmp1 * njac[k+1][3][2];\n lhs[k][CC][4][2] = tmp2 * fjac[k+1][4][2]\n - tmp1 * njac[k+1][4][2];\n\n lhs[k][CC][0][3] = tmp2 * fjac[k+1][0][3]\n - tmp1 * njac[k+1][0][3];\n lhs[k][CC][1][3] = tmp2 * fjac[k+1][1][3]\n - tmp1 * njac[k+1][1][3];\n lhs[k][CC][2][3] = tmp2 * fjac[k+1][2][3]\n - tmp1 * njac[k+1][2][3];\n lhs[k][CC][3][3] = tmp2 * fjac[k+1][3][3]\n - tmp1 * njac[k+1][3][3]\n - tmp1 * dz4;\n lhs[k][CC][4][3] = tmp2 * fjac[k+1][4][3]\n - tmp1 * njac[k+1][4][3];\n\n lhs[k][CC][0][4] = tmp2 * fjac[k+1][0][4]\n - tmp1 * njac[k+1][0][4];\n lhs[k][CC][1][4] = tmp2 * fjac[k+1][1][4]\n - tmp1 * njac[k+1][1][4];\n lhs[k][CC][2][4] = tmp2 * fjac[k+1][2][4]\n - tmp1 * njac[k+1][2][4];\n lhs[k][CC][3][4] = tmp2 * fjac[k+1][3][4]\n - tmp1 * njac[k+1][3][4];\n lhs[k][CC][4][4] = tmp2 * fjac[k+1][4][4]\n - tmp1 * njac[k+1][4][4]\n - tmp1 * dz5;\n }\n\n //---------------------------------------------------------------------\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // performs guaussian elimination on this cell.\n // \n // assumes that unpacking routines for non-first cells \n // preload C' and rhs' from previous cell.\n // \n // assumed send happens outside this routine, but that\n // c'(KMAX) and rhs'(KMAX) will be sent to next cell.\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // outer most do loops - sweeping in i direction\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // multiply c[0][j][i] by b_inverse and copy back to c\n // multiply rhs(0) by b_inverse(0) and copy to rhs\n //---------------------------------------------------------------------\n binvcrhs( lhs[0][BB], lhs[0][CC], rhs[0][j][i] );\n\n //---------------------------------------------------------------------\n // begin inner most do loop\n // do all the elements of the cell unless last \n //---------------------------------------------------------------------\n for (k = 1; k <= ksize-1; k++) {\n //-------------------------------------------------------------------\n // subtract A*lhs_vector(k-1) from lhs_vector(k)\n // \n // rhs(k) = rhs(k) - A*rhs(k-1)\n //-------------------------------------------------------------------\n matvec_sub(lhs[k][AA], rhs[k-1][j][i], rhs[k][j][i]);\n\n //-------------------------------------------------------------------\n // B(k) = B(k) - C(k-1)*A(k)\n // matmul_sub(AA,i,j,k,c,CC,i,j,k-1,c,BB,i,j,k)\n //-------------------------------------------------------------------\n matmul_sub(lhs[k][AA], lhs[k-1][CC], lhs[k][BB]);\n\n //-------------------------------------------------------------------\n // multiply c[k][j][i] by b_inverse and copy back to c\n // multiply rhs[0][j][i] by b_inverse[0][j][i] and copy to rhs\n //-------------------------------------------------------------------\n binvcrhs( lhs[k][BB], lhs[k][CC], rhs[k][j][i] );\n }\n\n //---------------------------------------------------------------------\n // Now finish up special cases for last cell\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)\n //---------------------------------------------------------------------\n matvec_sub(lhs[ksize][AA], rhs[ksize-1][j][i], rhs[ksize][j][i]);\n\n //---------------------------------------------------------------------\n // B(ksize) = B(ksize) - C(ksize-1)*A(ksize)\n // matmul_sub(AA,i,j,ksize,c,\n // $ CC,i,j,ksize-1,c,BB,i,j,ksize)\n //---------------------------------------------------------------------\n matmul_sub(lhs[ksize][AA], lhs[ksize-1][CC], lhs[ksize][BB]);\n\n //---------------------------------------------------------------------\n // multiply rhs(ksize) by b_inverse(ksize) and copy to rhs\n //---------------------------------------------------------------------\n binvrhs( lhs[ksize][BB], rhs[ksize][j][i] );\n\n //---------------------------------------------------------------------\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // back solve: if last cell, then generate U(ksize)=rhs(ksize)\n // else assume U(ksize) is loaded in un pack backsub_info\n // so just use it\n // after u(kstart) will be sent to next cell\n //---------------------------------------------------------------------\n\n for (k = ksize-1; k >= 0; k--) {\n for (m = 0; m < BLOCK_SIZE; m++) {\n for (n = 0; n < BLOCK_SIZE; n++) {\n rhs[k][j][i][m] = rhs[k][j][i][m] \n - lhs[k][CC][n][m]*rhs[k+1][j][i][n];\n }\n }\n }\n }\n } #pragma omp parallel for default(shared) shared(ksize) private(i,j,k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/BT/y_solve.c", "omp_pragma_line": "#pragma omp parallel for default(shared) shared(jsize) private(i,j,k,m,n)", "context_chars": 100, "text": " n jacobians for cell c\n //---------------------------------------------------------------------\n for (k = 1; k <= grid_points[2]-2; k++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 0; j <= jsize; j++) {\n tmp1 = rho_i[k][j][i];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n fjac[j][0][0] = 0.0;\n fjac[j][1][0] = 0.0;\n fjac[j][2][0] = 1.0;\n fjac[j][3][0] = 0.0;\n fjac[j][4][0] = 0.0;\n\n fjac[j][0][1] = - ( u[k][j][i][1]*u[k][j][i][2] ) * tmp2;\n fjac[j][1][1] = u[k][j][i][2] * tmp1;\n fjac[j][2][1] = u[k][j][i][1] * tmp1;\n fjac[j][3][1] = 0.0;\n fjac[j][4][1] = 0.0;\n\n fjac[j][0][2] = - ( u[k][j][i][2]*u[k][j][i][2]*tmp2)\n + c2 * qs[k][j][i];\n fjac[j][1][2] = - c2 * u[k][j][i][1] * tmp1;\n fjac[j][2][2] = ( 2.0 - c2 ) * u[k][j][i][2] * tmp1;\n fjac[j][3][2] = - c2 * u[k][j][i][3] * tmp1;\n fjac[j][4][2] = c2;\n\n fjac[j][0][3] = - ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2;\n fjac[j][1][3] = 0.0;\n fjac[j][2][3] = u[k][j][i][3] * tmp1;\n fjac[j][3][3] = u[k][j][i][2] * tmp1;\n fjac[j][4][3] = 0.0;\n\n fjac[j][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )\n * u[k][j][i][2] * tmp2;\n fjac[j][1][4] = - c2 * u[k][j][i][1]*u[k][j][i][2] * tmp2;\n fjac[j][2][4] = c1 * u[k][j][i][4] * tmp1 \n - c2 * ( qs[k][j][i] + u[k][j][i][2]*u[k][j][i][2] * tmp2 );\n fjac[j][3][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2;\n fjac[j][4][4] = c1 * u[k][j][i][2] * tmp1;\n\n njac[j][0][0] = 0.0;\n njac[j][1][0] = 0.0;\n njac[j][2][0] = 0.0;\n njac[j][3][0] = 0.0;\n njac[j][4][0] = 0.0;\n\n njac[j][0][1] = - c3c4 * tmp2 * u[k][j][i][1];\n njac[j][1][1] = c3c4 * tmp1;\n njac[j][2][1] = 0.0;\n njac[j][3][1] = 0.0;\n njac[j][4][1] = 0.0;\n\n njac[j][0][2] = - con43 * c3c4 * tmp2 * u[k][j][i][2];\n njac[j][1][2] = 0.0;\n njac[j][2][2] = con43 * c3c4 * tmp1;\n njac[j][3][2] = 0.0;\n njac[j][4][2] = 0.0;\n\n njac[j][0][3] = - c3c4 * tmp2 * u[k][j][i][3];\n njac[j][1][3] = 0.0;\n njac[j][2][3] = 0.0;\n njac[j][3][3] = c3c4 * tmp1;\n njac[j][4][3] = 0.0;\n\n njac[j][0][4] = - ( c3c4\n - c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1])\n - ( con43 * c3c4\n - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2])\n - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3])\n - c1345 * tmp2 * u[k][j][i][4];\n\n njac[j][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1];\n njac[j][2][4] = ( con43 * c3c4 - c1345 ) * tmp2 * u[k][j][i][2];\n njac[j][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3];\n njac[j][4][4] = ( c1345 ) * tmp1;\n }\n\n //---------------------------------------------------------------------\n // now joacobians set, so form left hand side in y direction\n //---------------------------------------------------------------------\n lhsinit(lhs, jsize);\n for (j = 1; j <= jsize-1; j++) {\n tmp1 = dt * ty1;\n tmp2 = dt * ty2;\n\n lhs[j][AA][0][0] = - tmp2 * fjac[j-1][0][0]\n - tmp1 * njac[j-1][0][0]\n - tmp1 * dy1; \n lhs[j][AA][1][0] = - tmp2 * fjac[j-1][1][0]\n - tmp1 * njac[j-1][1][0];\n lhs[j][AA][2][0] = - tmp2 * fjac[j-1][2][0]\n - tmp1 * njac[j-1][2][0];\n lhs[j][AA][3][0] = - tmp2 * fjac[j-1][3][0]\n - tmp1 * njac[j-1][3][0];\n lhs[j][AA][4][0] = - tmp2 * fjac[j-1][4][0]\n - tmp1 * njac[j-1][4][0];\n\n lhs[j][AA][0][1] = - tmp2 * fjac[j-1][0][1]\n - tmp1 * njac[j-1][0][1];\n lhs[j][AA][1][1] = - tmp2 * fjac[j-1][1][1]\n - tmp1 * njac[j-1][1][1]\n - tmp1 * dy2;\n lhs[j][AA][2][1] = - tmp2 * fjac[j-1][2][1]\n - tmp1 * njac[j-1][2][1];\n lhs[j][AA][3][1] = - tmp2 * fjac[j-1][3][1]\n - tmp1 * njac[j-1][3][1];\n lhs[j][AA][4][1] = - tmp2 * fjac[j-1][4][1]\n - tmp1 * njac[j-1][4][1];\n\n lhs[j][AA][0][2] = - tmp2 * fjac[j-1][0][2]\n - tmp1 * njac[j-1][0][2];\n lhs[j][AA][1][2] = - tmp2 * fjac[j-1][1][2]\n - tmp1 * njac[j-1][1][2];\n lhs[j][AA][2][2] = - tmp2 * fjac[j-1][2][2]\n - tmp1 * njac[j-1][2][2]\n - tmp1 * dy3;\n lhs[j][AA][3][2] = - tmp2 * fjac[j-1][3][2]\n - tmp1 * njac[j-1][3][2];\n lhs[j][AA][4][2] = - tmp2 * fjac[j-1][4][2]\n - tmp1 * njac[j-1][4][2];\n\n lhs[j][AA][0][3] = - tmp2 * fjac[j-1][0][3]\n - tmp1 * njac[j-1][0][3];\n lhs[j][AA][1][3] = - tmp2 * fjac[j-1][1][3]\n - tmp1 * njac[j-1][1][3];\n lhs[j][AA][2][3] = - tmp2 * fjac[j-1][2][3]\n - tmp1 * njac[j-1][2][3];\n lhs[j][AA][3][3] = - tmp2 * fjac[j-1][3][3]\n - tmp1 * njac[j-1][3][3]\n - tmp1 * dy4;\n lhs[j][AA][4][3] = - tmp2 * fjac[j-1][4][3]\n - tmp1 * njac[j-1][4][3];\n\n lhs[j][AA][0][4] = - tmp2 * fjac[j-1][0][4]\n - tmp1 * njac[j-1][0][4];\n lhs[j][AA][1][4] = - tmp2 * fjac[j-1][1][4]\n - tmp1 * njac[j-1][1][4];\n lhs[j][AA][2][4] = - tmp2 * fjac[j-1][2][4]\n - tmp1 * njac[j-1][2][4];\n lhs[j][AA][3][4] = - tmp2 * fjac[j-1][3][4]\n - tmp1 * njac[j-1][3][4];\n lhs[j][AA][4][4] = - tmp2 * fjac[j-1][4][4]\n - tmp1 * njac[j-1][4][4]\n - tmp1 * dy5;\n\n lhs[j][BB][0][0] = 1.0\n + tmp1 * 2.0 * njac[j][0][0]\n + tmp1 * 2.0 * dy1;\n lhs[j][BB][1][0] = tmp1 * 2.0 * njac[j][1][0];\n lhs[j][BB][2][0] = tmp1 * 2.0 * njac[j][2][0];\n lhs[j][BB][3][0] = tmp1 * 2.0 * njac[j][3][0];\n lhs[j][BB][4][0] = tmp1 * 2.0 * njac[j][4][0];\n\n lhs[j][BB][0][1] = tmp1 * 2.0 * njac[j][0][1];\n lhs[j][BB][1][1] = 1.0\n + tmp1 * 2.0 * njac[j][1][1]\n + tmp1 * 2.0 * dy2;\n lhs[j][BB][2][1] = tmp1 * 2.0 * njac[j][2][1];\n lhs[j][BB][3][1] = tmp1 * 2.0 * njac[j][3][1];\n lhs[j][BB][4][1] = tmp1 * 2.0 * njac[j][4][1];\n\n lhs[j][BB][0][2] = tmp1 * 2.0 * njac[j][0][2];\n lhs[j][BB][1][2] = tmp1 * 2.0 * njac[j][1][2];\n lhs[j][BB][2][2] = 1.0\n + tmp1 * 2.0 * njac[j][2][2]\n + tmp1 * 2.0 * dy3;\n lhs[j][BB][3][2] = tmp1 * 2.0 * njac[j][3][2];\n lhs[j][BB][4][2] = tmp1 * 2.0 * njac[j][4][2];\n\n lhs[j][BB][0][3] = tmp1 * 2.0 * njac[j][0][3];\n lhs[j][BB][1][3] = tmp1 * 2.0 * njac[j][1][3];\n lhs[j][BB][2][3] = tmp1 * 2.0 * njac[j][2][3];\n lhs[j][BB][3][3] = 1.0\n + tmp1 * 2.0 * njac[j][3][3]\n + tmp1 * 2.0 * dy4;\n lhs[j][BB][4][3] = tmp1 * 2.0 * njac[j][4][3];\n\n lhs[j][BB][0][4] = tmp1 * 2.0 * njac[j][0][4];\n lhs[j][BB][1][4] = tmp1 * 2.0 * njac[j][1][4];\n lhs[j][BB][2][4] = tmp1 * 2.0 * njac[j][2][4];\n lhs[j][BB][3][4] = tmp1 * 2.0 * njac[j][3][4];\n lhs[j][BB][4][4] = 1.0\n + tmp1 * 2.0 * njac[j][4][4] \n + tmp1 * 2.0 * dy5;\n\n lhs[j][CC][0][0] = tmp2 * fjac[j+1][0][0]\n - tmp1 * njac[j+1][0][0]\n - tmp1 * dy1;\n lhs[j][CC][1][0] = tmp2 * fjac[j+1][1][0]\n - tmp1 * njac[j+1][1][0];\n lhs[j][CC][2][0] = tmp2 * fjac[j+1][2][0]\n - tmp1 * njac[j+1][2][0];\n lhs[j][CC][3][0] = tmp2 * fjac[j+1][3][0]\n - tmp1 * njac[j+1][3][0];\n lhs[j][CC][4][0] = tmp2 * fjac[j+1][4][0]\n - tmp1 * njac[j+1][4][0];\n\n lhs[j][CC][0][1] = tmp2 * fjac[j+1][0][1]\n - tmp1 * njac[j+1][0][1];\n lhs[j][CC][1][1] = tmp2 * fjac[j+1][1][1]\n - tmp1 * njac[j+1][1][1]\n - tmp1 * dy2;\n lhs[j][CC][2][1] = tmp2 * fjac[j+1][2][1]\n - tmp1 * njac[j+1][2][1];\n lhs[j][CC][3][1] = tmp2 * fjac[j+1][3][1]\n - tmp1 * njac[j+1][3][1];\n lhs[j][CC][4][1] = tmp2 * fjac[j+1][4][1]\n - tmp1 * njac[j+1][4][1];\n\n lhs[j][CC][0][2] = tmp2 * fjac[j+1][0][2]\n - tmp1 * njac[j+1][0][2];\n lhs[j][CC][1][2] = tmp2 * fjac[j+1][1][2]\n - tmp1 * njac[j+1][1][2];\n lhs[j][CC][2][2] = tmp2 * fjac[j+1][2][2]\n - tmp1 * njac[j+1][2][2]\n - tmp1 * dy3;\n lhs[j][CC][3][2] = tmp2 * fjac[j+1][3][2]\n - tmp1 * njac[j+1][3][2];\n lhs[j][CC][4][2] = tmp2 * fjac[j+1][4][2]\n - tmp1 * njac[j+1][4][2];\n\n lhs[j][CC][0][3] = tmp2 * fjac[j+1][0][3]\n - tmp1 * njac[j+1][0][3];\n lhs[j][CC][1][3] = tmp2 * fjac[j+1][1][3]\n - tmp1 * njac[j+1][1][3];\n lhs[j][CC][2][3] = tmp2 * fjac[j+1][2][3]\n - tmp1 * njac[j+1][2][3];\n lhs[j][CC][3][3] = tmp2 * fjac[j+1][3][3]\n - tmp1 * njac[j+1][3][3]\n - tmp1 * dy4;\n lhs[j][CC][4][3] = tmp2 * fjac[j+1][4][3]\n - tmp1 * njac[j+1][4][3];\n\n lhs[j][CC][0][4] = tmp2 * fjac[j+1][0][4]\n - tmp1 * njac[j+1][0][4];\n lhs[j][CC][1][4] = tmp2 * fjac[j+1][1][4]\n - tmp1 * njac[j+1][1][4];\n lhs[j][CC][2][4] = tmp2 * fjac[j+1][2][4]\n - tmp1 * njac[j+1][2][4];\n lhs[j][CC][3][4] = tmp2 * fjac[j+1][3][4]\n - tmp1 * njac[j+1][3][4];\n lhs[j][CC][4][4] = tmp2 * fjac[j+1][4][4]\n - tmp1 * njac[j+1][4][4]\n - tmp1 * dy5;\n }\n\n //---------------------------------------------------------------------\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // performs guaussian elimination on this cell.\n // \n // assumes that unpacking routines for non-first cells \n // preload C' and rhs' from previous cell.\n // \n // assumed send happens outside this routine, but that\n // c'(JMAX) and rhs'(JMAX) will be sent to next cell\n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // multiply c[k][0][i] by b_inverse and copy back to c\n // multiply rhs(0) by b_inverse(0) and copy to rhs\n //---------------------------------------------------------------------\n binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][0][i] );\n\n //---------------------------------------------------------------------\n // begin inner most do loop\n // do all the elements of the cell unless last \n //---------------------------------------------------------------------\n for (j = 1; j <= jsize-1; j++) {\n //-------------------------------------------------------------------\n // subtract A*lhs_vector(j-1) from lhs_vector(j)\n // \n // rhs(j) = rhs(j) - A*rhs(j-1)\n //-------------------------------------------------------------------\n matvec_sub(lhs[j][AA], rhs[k][j-1][i], rhs[k][j][i]);\n\n //-------------------------------------------------------------------\n // B(j) = B(j) - C(j-1)*A(j)\n //-------------------------------------------------------------------\n matmul_sub(lhs[j][AA], lhs[j-1][CC], lhs[j][BB]);\n\n //-------------------------------------------------------------------\n // multiply c[k][j][i] by b_inverse and copy back to c\n // multiply rhs[k][0][i] by b_inverse[k][0][i] and copy to rhs\n //-------------------------------------------------------------------\n binvcrhs( lhs[j][BB], lhs[j][CC], rhs[k][j][i] );\n }\n\n //---------------------------------------------------------------------\n // rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)\n //---------------------------------------------------------------------\n matvec_sub(lhs[jsize][AA], rhs[k][jsize-1][i], rhs[k][jsize][i]);\n\n //---------------------------------------------------------------------\n // B(jsize) = B(jsize) - C(jsize-1)*A(jsize)\n // matmul_sub(AA,i,jsize,k,c,\n // $ CC,i,jsize-1,k,c,BB,i,jsize,k)\n //---------------------------------------------------------------------\n matmul_sub(lhs[jsize][AA], lhs[jsize-1][CC], lhs[jsize][BB]);\n\n //---------------------------------------------------------------------\n // multiply rhs(jsize) by b_inverse(jsize) and copy to rhs\n //---------------------------------------------------------------------\n binvrhs( lhs[jsize][BB], rhs[k][jsize][i] );\n\n //---------------------------------------------------------------------\n // back solve: if last cell, then generate U(jsize)=rhs(jsize)\n // else assume U(jsize) is loaded in un pack backsub_info\n // so just use it\n // after u(jstart) will be sent to next cell\n //---------------------------------------------------------------------\n for (j = jsize-1; j >= 0; j--) {\n for (m = 0; m < BLOCK_SIZE; m++) {\n for (n = 0; n < BLOCK_SIZE; n++) {\n rhs[k][j][i][m] = rhs[k][j][i][m] \n - lhs[j][CC][n][m]*rhs[k][j+1][i][n];\n }\n }\n }\n }\n } #pragma omp parallel for default(shared) shared(jsize) private(i,j,k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j) \\", "context_chars": 100, "text": "---------------------------------------------------\n norm_temp1 = 0.0;\n norm_temp2 = 0.0;\n reduction(+:norm_temp1,norm_temp2)\n for (j = 0; j < lastcol - firstcol + 1; j++) {\n norm_temp1 = norm_temp1 + x[j] * z[j];\n norm_temp2 = norm_temp2 + z[j] * z[j];\n } #pragma omp parallel for default(shared) private(j) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j)", "context_chars": 100, "text": "alize z to obtain x\n //---------------------------------------------------------------------\n for (j = 0; j < lastcol - firstcol + 1; j++) { \n x[j] = norm_temp2 * z[j];\n } #pragma omp parallel for default(shared) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "ector to (1, 1, .... 1)\n //---------------------------------------------------------------------\n for (i = 0; i < NA+1; i++) {\n x[i] = 1.0;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j) \\", "context_chars": 100, "text": "---------------------------------------------------\n norm_temp1 = 0.0;\n norm_temp2 = 0.0;\n reduction(+:norm_temp1,norm_temp2)\n for (j = 0; j < lastcol - firstcol + 1; j++) {\n norm_temp1 = norm_temp1 + x[j]*z[j];\n norm_temp2 = norm_temp2 + z[j]*z[j];\n } #pragma omp parallel for default(shared) private(j) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j)", "context_chars": 100, "text": "alize z to obtain x\n //---------------------------------------------------------------------\n for (j = 0; j < lastcol - firstcol + 1; j++) {\n x[j] = norm_temp2 * z[j];\n } #pragma omp parallel for default(shared) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/LU/setiv.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,m,pxi,peta,pzeta, \\", "context_chars": 100, "text": " pzeta;\n double ue_1jk[5], ue_nx0jk[5], ue_i1k[5];\n double ue_iny0k[5], ue_ij1[5], ue_ijnz[5];\n\n xi,eta,zeta,ue_ijnz,ue_ij1,ue_iny0k,ue_i1k,ue_nx0jk,ue_1jk) \\\n shared(nx0,ny0,nz)\n for (k = 1; k < nz - 1; k++) {\n zeta = ( (double)k ) / (nz-1);\n for (j = 1; j < ny - 1; j++) {\n eta = ( (double)j ) / (ny0-1);\n for (i = 1; i < nx - 1; i++) {\n xi = ( (double)i ) / (nx0-1);\n exact(0, j, k, ue_1jk);\n exact(nx0-1, j, k, ue_nx0jk);\n exact(i, 0, k, ue_i1k);\n exact(i, ny0-1, k, ue_iny0k);\n exact(i, j, 0, ue_ij1);\n exact(i, j, nz-1, ue_ijnz);\n\n for (m = 0; m < 5; m++) {\n pxi = ( 1.0 - xi ) * ue_1jk[m]\n + xi * ue_nx0jk[m];\n peta = ( 1.0 - eta ) * ue_i1k[m]\n + eta * ue_iny0k[m];\n pzeta = ( 1.0 - zeta ) * ue_ij1[m]\n + zeta * ue_ijnz[m];\n\n u[k][j][i][m] = pxi + peta + pzeta\n - pxi * peta - peta * pzeta - pzeta * pxi\n + pxi * peta * pzeta;\n }\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,m,pxi,peta,pzeta, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k)", "context_chars": 100, "text": "lex (*)[d2][d1+1])ou1;\n double (*twiddle)[d2][d1+1] = (double (*)[d2][d1+1])ot;\n\n int i, j, k;\n\n for (k = 0; k < d3; k++) {\n for (j = 0; j < d2; j++) {\n for (i = 0; i < d1; i++) {\n u0[k][j][i] = dcmplx(0.0, 0.0);\n u1[k][j][i] = dcmplx(0.0, 0.0);\n twiddle[k][j][i] = 0.0;\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k)", "context_chars": 100, "text": "lex (*)[d2][d1+1])ou1;\n double (*twiddle)[d2][d1+1] = (double (*)[d2][d1+1])ot;\n\n int i, j, k;\n\n for (k = 0; k < d3; k++) {\n for (j = 0; j < d2; j++) {\n for (i = 0; i < d1; i++) {\n u0[k][j][i] = dcmplx_mul2(u0[k][j][i], twiddle[k][j][i]);\n u1[k][j][i] = u0[k][j][i];\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(k,j,x0)", "context_chars": 100, "text": "n one square at a time.\n //---------------------------------------------------------------------\n for (k = 0; k < dims[2]; k++) {\n x0 = starts[k];\n for (j = 0; j < dims[1]; j++) {\n vranlc(2*NX, &x0, A, (double *)&u0[k][j][0]);\n }\n } #pragma omp parallel for default(shared) private(k,j,x0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,kk,kk2,jj,kj2,ii)", "context_chars": 100, "text": "----------------------------------------------------------------\n\n ap = -4.0 * ALPHA * PI * PI;\n\n for (k = 0; k < dims[2]; k++) {\n kk = ((k + NZ/2) % NZ) - NZ/2;\n kk2 = kk*kk;\n for (j = 0; j < dims[1]; j++) {\n jj = ((j + NY/2) % NY) - NY/2;\n kj2 = jj*jj + kk2;\n for (i = 0; i < dims[0]; i++) {\n ii = ((i + NX/2) % NX) - NX/2;\n twiddle[k][j][i] = exp(ap * (double)(ii*ii+kj2));\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,kk,kk2,jj,kj2,ii)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,jj)", "context_chars": 100, "text": " int logd1;\n int i, j, k, jj;\n\n logd1 = ilog2(d1);\n\n if (timers_enabled) timer_start(T_fftx);\n for (k = 0; k < d3; k++) {\n for (jj = 0; jj <= d2 - fftblock; jj += fftblock) {\n for (j = 0; j < fftblock; j++) {\n for (i = 0; i < d1; i++) {\n ty1[i][j] = x[k][j+jj][i];\n }\n }\n\n cfftz(is, logd1, d1, ty1, ty2);\n\n for (j = 0; j < fftblock; j++) {\n for (i = 0; i < d1; i++) {\n xout[k][j+jj][i] = ty1[i][j];\n }\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,jj)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,ii)", "context_chars": 100, "text": " int logd2;\n int i, j, k, ii;\n\n logd2 = ilog2(d2);\n\n if (timers_enabled) timer_start(T_ffty);\n for (k = 0; k < d3; k++) {\n for (ii = 0; ii <= d1 - fftblock; ii += fftblock) {\n for (j = 0; j < d2; j++) {\n for (i = 0; i < fftblock; i++) {\n ty1[j][i] = x[k][j][i+ii];\n }\n }\n\n cfftz(is, logd2, d2, ty1, ty2);\n\n for (j = 0; j < d2; j++) {\n for (i = 0; i < fftblock; i++) {\n xout[k][j][i+ii] = ty1[j][i];\n }\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,ii)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,ii)", "context_chars": 100, "text": " int logd3;\n int i, j, k, ii;\n\n logd3 = ilog2(d3);\n\n if (timers_enabled) timer_start(T_fftz);\n for (j = 0; j < d2; j++) {\n for (ii = 0; ii <= d1 - fftblock; ii += fftblock) {\n for (k = 0; k < d3; k++) {\n for (i = 0; i < fftblock; i++) {\n ty1[k][i] = x[k][j][i+ii];\n }\n }\n\n cfftz(is, logd3, d3, ty1, ty2);\n\n for (k = 0; k < d3; k++) {\n for (i = 0; i < fftblock; i++) {\n xout[k][j][i+ii] = ty1[k][i];\n }\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,ii)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "--------------------------------------------------\nvoid reciprocal(double a[], int n)\n{\n int i;\n\n for (i = 0; i < n; i++) {\n a[i] = 1.0/a[i];\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "-----------------------------------\nvoid r_init_omp(double a[], int n, double _const)\n{\n int i;\n\n for (i = 0; i < n; i++) {\n a[i] = _const;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "----------------------------------------\nvoid nr_init_omp(int a[], int n, int _const)\n{\n int i;\n\n for (i = 0; i < n; i++) {\n a[i] = _const;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "---------------------------------\nvoid l_init_omp(logical a[], int n, logical _const)\n{\n int i;\n\n for (i = 0; i < n; i++) {\n a[i] = _const;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "-------------------------------\nvoid adds2m1(double a[], double b[], double c1, int n)\n{\n int i;\n for (i = 0; i < n; i++) {\n a[i] = a[i]+c1*b[i];\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "-------------------------------\nvoid adds1m1(double a[], double b[], double c1, int n)\n{\n int i;\n for (i = 0; i < n; i++) {\n a[i] = c1*a[i]+b[i];\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "--------------------------------------------\nvoid col2(double a[], double b[], int n)\n{\n int i;\n\n for (i = 0; i < n; i++) {\n a[i] = a[i]*b[i];\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "---------------------------------------------\nvoid add2(double a[], double b[], int n)\n{\n int i;\n for (i = 0; i < n; i++) {\n a[i] = a[i]+b[i];\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,isize,ieltotal,iel) \\", "context_chars": 100, "text": "------\ndouble calc_norm()\n{\n double total, ieltotal;\n int iel, k, j, i, isize;\n\n total = 0.0;\n\n reduction(+:total)\n for (iel = 0; iel < nelt; iel++) {\n ieltotal = 0.0;\n isize = size_e[iel];\n for (k = 0; k < LX1; k++) {\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n ieltotal = ieltotal+ta1[iel][k][j][i]*w3m1[k][j][i]\n *jacm1_s[isize][k][j][i];\n }\n }\n }\n total = total+ieltotal;\n } #pragma omp parallel for default(shared) private(i,j,k,isize,ieltotal,iel) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(ahead,ii,iel)", "context_chars": 100, "text": "log+1;\n } while (iel < nelt);\n\n ntemp = 1;\n for (i = 0; i < nellog; i++) {\n n1 = ntemp*2;\n for (iel = n1; iel <= nelt; iel += n1) {\n ahead = frontier[iel-ntemp-1];\n for (ii = ntemp-1; ii >= 0; ii--) {\n frontier[iel-ii-1] = frontier[iel-ii-1]+ahead;\n }\n } #pragma omp parallel for default(shared) private(ahead,ii,iel)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/utils.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(ii)", "context_chars": 100, "text": "+1)*n1;\n ntemp1 = iel-nelt;\n if (ntemp1 < ntemp) {\n ahead = frontier[iel-ntemp-1];\n for (ii = ntemp-1; ii >= ntemp1; ii--) {\n frontier[iel-ii-1] = frontier[iel-ii-1]+ahead;\n } #pragma omp parallel for default(shared) private(ii)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/transfer_au.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "--------------------------------\n\nvoid init_locks()\n{\n int i;\n\n // initialize locks in parallel\n for (i = 0; i < 8*LELT; i++) {\n omp_init_lock(&tlock[i]);\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/transfer_au.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(il,j,ig,i,col,ije2,ije1, \\", "context_chars": 100, "text": "2, col, i, j, ig, il;\n\n // zero out tx on element boundaries\n col2(tx, (double *)tmult, ntot);\n\n ig4,ig3,ig2,ig1,nnje,il4,il3,il2,il1,iface,ie,tmp)\n for (ie = 0; ie < nelt; ie++) {\n for (iface = 0; iface < NSIDES; iface++) {\n // get the collocation point index of the four local corners on the\n // face iface of element ie\n il1 = idel[ie][iface][0][0];\n il2 = idel[ie][iface][0][LX1-1];\n il3 = idel[ie][iface][LX1-1][0];\n il4 = idel[ie][iface][LX1-1][LX1-1];\n\n // get the mortar indices of the four local corners\n ig1 = idmo[ie][iface][0][0][0][0];\n ig2 = idmo[ie][iface][1][0][0][LX1-1];\n ig3 = idmo[ie][iface][0][1][LX1-1][0];\n ig4 = idmo[ie][iface][1][1][LX1-1][LX1-1];\n\n // copy the value from tmor to tx for these four local corners\n tx[il1] = tmor[ig1];\n tx[il2] = tmor[ig2];\n tx[il3] = tmor[ig3];\n tx[il4] = tmor[ig4];\n\n // nnje=1 for conforming faces, nnje=2 for nonconforming faces\n if (cbc[ie][iface] == 3) {\n nnje = 2;\n } else {\n nnje = 1;\n }\n\n // for nonconforming faces\n if (nnje == 2) {\n // nonconforming faces have four pieces of mortar, first map them to\n // two intermediate mortars, stored in tmp\n r_init((double *)tmp, LX1*LX1*2, 0.0);\n\n for (ije1 = 0; ije1 < nnje; ije1++) {\n for (ije2 = 0; ije2 < nnje; ije2++) {\n for (col = 0; col < LX1; col++) {\n // in each row col, when coloumn i=1 or LX1, the value\n // in tmor is copied to tmp\n i = v_end[ije2];\n ig = idmo[ie][iface][ije2][ije1][col][i];\n tmp[ije1][col][i] = tmor[ig];\n\n // in each row col, value in the interior three collocation\n // points is computed by apply mapping matrix qbnew to tmor\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][col][i];\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][ije2][ije1][col][j];\n tmp[ije1][col][i] = tmp[ije1][col][i] +\n qbnew[ije2][j][i-1]*tmor[ig];\n }\n }\n }\n }\n }\n\n // mapping from two pieces of intermediate mortar tmp to element\n // face tx\n for (ije1 = 0; ije1 < nnje; ije1++) {\n // the first column, col=0, is an edge of face iface.\n // the value on the three interior collocation points, tx, is\n // computed by applying mapping matrices qbnew to tmp.\n // the mapping result is divided by 2, because there will be\n // duplicated contribution from another face sharing this edge.\n col = 0;\n for (i = 1; i < LX1-1; i++) {\n il= idel[ie][iface][i][col];\n for (j = 0; j < LX1; j++) {\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*\n tmp[ije1][j][col]*0.5;\n }\n }\n\n // for column 1 ~ lx-2\n for (col = 1; col < LX1-1; col++) {\n //when i=0 or LX1-1, the collocation points are also on an edge of\n // the face, so the mapping result also needs to be divided by 2\n i = v_end[ije1];\n il = idel[ie][iface][i][col];\n tx[il] = tx[il]+tmp[ije1][i][col]*0.5;\n\n // compute the value at interior collocation points in\n // columns 1 ~ LX1-1\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][col];\n for (j = 0; j < LX1; j++) {\n tx[il] = tx[il] + qbnew[ije1][j][i-1]* tmp[ije1][j][col];\n }\n }\n }\n\n // same as col=0\n col = LX1-1;\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][col];\n for (j = 0; j < LX1; j++) {\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*\n tmp[ije1][j][col]*0.5;\n }\n }\n }\n\n // for conforming faces\n } else {\n // face interior\n for (col = 1; col < LX1-1; col++) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][col][i];\n ig = idmo[ie][iface][0][0][col][i];\n tx[il] = tmor[ig];\n }\n }\n\n // edges of conforming faces\n\n // if local edge 0 is a nonconforming edge\n if (idmo[ie][iface][0][0][0][LX1-1] != -1) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][0][i];\n for (ije1 = 0; ije1 < 2; ije1++) {\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][ije1][0][0][j];\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*tmor[ig]*0.5;\n }\n }\n }\n\n // if local edge 0 is a conforming edge\n } else {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][0][i];\n ig = idmo[ie][iface][0][0][0][i];\n tx[il] = tmor[ig];\n }\n }\n\n // if local edge 1 is a nonconforming edge\n if (idmo[ie][iface][1][0][1][LX1-1] != -1) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][LX1-1];\n for (ije1 = 0; ije1 < 2; ije1++) {\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][1][ije1][j][LX1-1];\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*tmor[ig]*0.5;\n }\n }\n }\n\n // if local edge 1 is a conforming edge\n } else {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][LX1-1];\n ig = idmo[ie][iface][0][0][i][LX1-1];\n tx[il] = tmor[ig];\n }\n }\n\n // if local edge 2 is a nonconforming edge\n if (idmo[ie][iface][0][1][LX1-1][1] != -1) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][LX1-1][i];\n for (ije1 = 0; ije1 < 2; ije1++) {\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][ije1][1][LX1-1][j];\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*tmor[ig]*0.5;\n }\n }\n }\n\n // if local edge 2 is a conforming edge\n } else {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][LX1-1][i];\n ig = idmo[ie][iface][0][0][LX1-1][i];\n tx[il] = tmor[ig];\n }\n }\n\n // if local edge 3 is a nonconforming edge\n if (idmo[ie][iface][0][0][LX1-1][0] != -1) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][0];\n for (ije1 = 0; ije1 < 2; ije1++) {\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][0][ije1][j][0];\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*tmor[ig]*0.5;\n }\n }\n }\n // if local edge 3 is a conforming edge\n } else {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][0];\n ig = idmo[ie][iface][0][0][i][0];\n tx[il] = tmor[ig];\n }\n }\n }\n }\n } #pragma omp parallel for default(shared) private(il,j,ig,i,col,ije2,ije1, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/mason.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel,sumcb,ij1,ij2, \\", "context_chars": 100, "text": "mortar point. \n\n // VERTICES\n count = -1;\n\n // assign mortar point indices to element vertices\n cb,cb1,cb2,ntemp,ntemp1)\n for (iel = 0; iel < nelt; iel++) {\n\n // first calculate how many new mortar indices will be generated for \n // each element.\n\n // For each element, at least one vertex (vertex 7) will be new mortar\n // point. All possible new mortar points will be on face 1,3 or 5. By\n // checking the type of these three faces, we are able to tell\n // how many new mortar vertex points will be generated in each element.\n\n cb = cbc[iel][5];\n cb1 = cbc[iel][3];\n cb2 = cbc[iel][1];\n\n // For different combinations of the type of these three faces,\n // we group them into 27 configurations.\n // For different face types we assign the following integers:\n // 1 for type 2 or 3\n // 2 for type 0\n // 5 for type 1\n // By summing these integers for faces 1,3 and 5, sumcb will have \n // 10 different numbers indicating 10 different combinations. \n\n sumcb = 0;\n if (cb == 2 || cb == 3) {\n sumcb = sumcb+1;\n } else if (cb == 0) {\n sumcb = sumcb+2;\n } else if (cb == 1) {\n sumcb = sumcb+5;\n }\n if (cb1 == 2 || cb1 == 3) {\n sumcb = sumcb+1;\n } else if (cb1 == 0) {\n sumcb = sumcb+2;\n } else if (cb1 == 1) {\n sumcb = sumcb+5;\n }\n if (cb2 == 2 || cb2 == 3) {\n sumcb = sumcb+1;\n } else if (cb2 == 0) {\n sumcb = sumcb+2;\n } else if (cb2 == 1) {\n sumcb = sumcb+5;\n }\n\n // compute newc[iel]\n // newc[iel] records how many new mortar indices will be generated\n // for element iel\n // vassign[iel][i] records the element vertex of the i'th new mortar \n // vertex point for element iel. e.g. vassign[iel][1]=8 means\n // the 2nd new mortar vertex point generated on element\n // iel is iel's 8th vertex.\n\n if (sumcb == 3) {\n // the three face types for face 1,3, and 5 are 2 2 2\n newc[iel] = 1;\n vassign[iel][0] = 7;\n\n } else if (sumcb == 4) {\n // the three face types for face 1,3 and 5 are 2 2 0 (not \n // necessarily in this order)\n newc[iel] = 2;\n if (cb == 0) {\n vassign[iel][0] = 3;\n } else if (cb1 == 0) {\n vassign[iel][0] = 5;\n } else if (cb2 == 0) {\n vassign[iel][0] = 6;\n }\n vassign[iel][1] = 7;\n\n } else if (sumcb == 7) {\n // the three face types for face 1,3 and 5 are 2 2 1 (not \n // necessarily in this order)\n if (cb == 1) {\n ij1 = ijel[iel][5][0];\n ij2 = ijel[iel][5][1];\n if (ij1 == 0 && ij2 == 0) {\n newc[iel] = 2;\n vassign[iel][0] = 3;\n vassign[iel][1] = 7;\n } else if (ij1 == 0 && ij2 == 1) {\n ntemp = sje[iel][5][0][0];\n if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel) {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n } else {\n newc[iel] = 2;\n vassign[iel][0] = 3;\n vassign[iel][1] = 7;\n }\n } else if (ij1 == 1 && ij2 == 0) {\n ntemp = sje[iel][5][0][0];\n if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel) {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n } else {\n newc[iel] = 2;\n vassign[iel][0] = 3;\n vassign[iel][1] = 7;\n }\n } else {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n }\n } else if (cb1 == 1) {\n ij1 = ijel[iel][3][0];\n ij2 = ijel[iel][3][1];\n if (ij1 == 0 && ij2 == 0) {\n newc[iel] = 2;\n vassign[iel][0] = 5;\n vassign[iel][1] = 7;\n } else if (ij1 == 0 && ij2 == 1) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel) {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n } else {\n newc[iel] = 2;\n vassign[iel][0] = 5;\n vassign[iel][1] = 7;\n }\n } else if (ij1 == 1 && ij2 == 0) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel) {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n } else {\n newc[iel] = 2;\n vassign[iel][0] = 5;\n vassign[iel][1] = 7;\n }\n } else {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n }\n\n } else if (cb2 == 1) {\n ij1 = ijel[iel][1][0];\n ij2 = ijel[iel][1][1];\n if (ij1 == 0 && ij2 == 0) {\n newc[iel] = 2;\n vassign[iel][0] = 6;\n vassign[iel][1] = 7;\n } else if (ij1 == 0 && ij2 == 1) {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel) {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n } else {\n newc[iel] = 2;\n vassign[iel][0] = 6;\n vassign[iel][1] = 7;\n }\n\n } else if (ij1 == 1 && ij2 == 0) {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel) {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n } else {\n newc[iel] = 2;\n vassign[iel][0] = 6;\n vassign[iel][1] = 7;\n }\n } else {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n }\n }\n\n } else if (sumcb == 5) {\n // the three face types for face 1,3 and 5 are 2/3 0 0 (not \n // necessarily in this order)\n newc[iel] = 4;\n if (cb == 2 || cb == 3) {\n vassign[iel][0] = 4;\n vassign[iel][1] = 5;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n } else if (cb1 == 2 || cb1 == 3) {\n vassign[iel][0] = 2;\n vassign[iel][1] = 3;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n } else if (cb2 == 2 || cb2 == 3) {\n vassign[iel][0] = 1;\n vassign[iel][1] = 3;\n vassign[iel][2] = 5;\n vassign[iel][3] = 7;\n }\n\n } else if (sumcb == 8) {\n // the three face types for face 1,3 and 5 are 2 0 1 (not \n // necessarily in this order)\n\n // if face 2 of type 1\n if (cb == 1) {\n if (cb1 == 2 || cb1 == 3) {\n ij1 = ijel[iel][5][0];\n if (ij1 == 0) {\n newc[iel] = 4;\n vassign[iel][0] = 2;\n vassign[iel][1] = 3;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n } else {\n ntemp = sje[iel][5][0][0];\n if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 6;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 3;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n }\n }\n\n } else if (cb2 == 2 || cb2 == 3) {\n if (ijel[iel][5][1] == 0) {\n newc[iel] = 4;\n vassign[iel][0] = 1;\n vassign[iel][1] = 3;\n vassign[iel][2] = 5;\n vassign[iel][3] = 7;\n } else {\n ntemp = sje[iel][5][0][0];\n if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 5;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 3;\n vassign[iel][1] = 5;\n vassign[iel][2] = 7;\n }\n }\n }\n\n // if face 4 of type 1\n } else if (cb1 == 1) {\n if (cb == 2 || cb == 3) {\n ij1 = ijel[iel][3][0];\n ij2 = ijel[iel][3][1];\n\n if (ij1 == 0 && ij2 == 0) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] < iel) {\n newc[iel] = 3;\n vassign[iel][0] = 5;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n } else {\n newc[iel] = 4;\n vassign[iel][0] = 4;\n vassign[iel][1] = 5;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n }\n } else if (ij1 == 0 && ij2 == 1) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel) {\n newc[iel] = 3;\n vassign[iel][0] = 4;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n } else {\n newc[iel] = 4;\n vassign[iel][0] = 4;\n vassign[iel][1] = 5;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n }\n } else if (ij1 == 1 && ij2 == 0) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 6;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 5;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n }\n } else if (ij1 == 1 && ij2 == 1) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 6;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 4;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n }\n }\n } else {\n if (ijel[iel][3][1] == 0) {\n newc[iel] = 4;\n vassign[iel][0] = 1;\n vassign[iel][1] = 3;\n vassign[iel][2] = 5;\n vassign[iel][3] = 7;\n } else {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 3;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 3;\n vassign[iel][1] = 5;\n vassign[iel][2] = 7;\n }\n }\n }\n // if face 6 of type 1\n } else if (cb2 == 1) {\n if (cb == 2 || cb == 3) {\n if (ijel[iel][1][0] == 0) {\n newc[iel] = 4;\n vassign[iel][0] = 4;\n vassign[iel][1] = 5;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n } else {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 5;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 5;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n }\n }\n } else { \n if (ijel[iel][1][1] == 0) {\n newc[iel] = 4;\n vassign[iel][0] = 2;\n vassign[iel][1] = 3;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n } else {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 3;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 3;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n }\n }\n }\n }\n\n } else if (sumcb == 11) {\n // the three face type for face 2,4 and 6 are 2 1 1(not \n // necessarily in this order)\n if (cb == 2 || cb == 3) {\n if (ijel[iel][3][0] == 0) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] < iel) {\n newc[iel] = 3;\n vassign[iel][0] = 5;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n } else {\n newc[iel] = 4;\n vassign[iel][0] = 4;\n vassign[iel][1] = 5;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n }\n\n // if ijel[iel][3][0]=1\n } else {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] < iel) {\n ntemp1 = sje[iel][3][0][0];\n if (cbc[ntemp1][4] == 3 && sje[ntemp1][4][0][0] < iel) {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n } else {\n newc[iel] = 2;\n vassign[iel][0] = 5;\n vassign[iel][1] = 7;\n }\n } else {\n ntemp1 = sje[iel][3][0][0];\n if (cbc[ntemp1][4] == 3 && sje[ntemp1][4][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 6;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 5;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n }\n }\n }\n } else if (cb1 == 2 || cb1 == 3) {\n if (ijel[iel][1][1] == 0) {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] < iel) {\n newc[iel] = 3;\n vassign[iel][0] = 3;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n } else {\n newc[iel] = 4;\n vassign[iel][0] = 2;\n vassign[iel][1] = 3;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n }\n // if ijel[iel][1][1]=1\n } else {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] < iel) {\n ntemp1 = sje[iel][5][0][0];\n if (cbc[ntemp1][2] == 3 && sje[ntemp1][2][0][0] < iel) {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n } else {\n newc[iel] = 2;\n vassign[iel][0] = 3;\n vassign[iel][1] = 7;\n }\n } else {\n ntemp1 = sje[iel][5][0][0];\n if (cbc[ntemp1][2] == 3 && sje[ntemp1][2][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 6;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 3;\n vassign[iel][1] = 6;\n vassign[iel][2] = 7;\n }\n }\n }\n } else if (cb2 == 2 || cb2 == 3) {\n if (ijel[iel][5][1] == 0) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] < iel) {\n newc[iel] = 3;\n vassign[iel][0] = 3;\n vassign[iel][1] = 5;\n vassign[iel][2] = 7;\n } else {\n newc[iel] = 4;\n vassign[iel][0] = 1;\n vassign[iel][1] = 3;\n vassign[iel][2] = 5;\n vassign[iel][3] = 7;\n }\n // if ijel[iel][5][1]=1\n } else {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] < iel) {\n ntemp1 = sje[iel][5][0][0];\n if (cbc[ntemp1][0] == 3 && sje[ntemp1][0][0][0] < iel) {\n newc[iel] = 1;\n vassign[iel][0] = 7;\n } else {\n newc[iel] = 2;\n vassign[iel][0] = 3;\n vassign[iel][1] = 7;\n }\n } else {\n ntemp1 = sje[iel][5][0][0];\n if (cbc[ntemp1][0] == 3 && sje[ntemp1][0][0][0] < iel) {\n newc[iel] = 2;\n vassign[iel][0] = 5;\n vassign[iel][1] = 7;\n } else {\n newc[iel] = 3;\n vassign[iel][0] = 3;\n vassign[iel][1] = 5;\n vassign[iel][2] = 7;\n }\n }\n }\n }\n\n } else if (sumcb == 6) {\n // the three face type for face 1,3 and 5 are 0 0 0(not \n // necessarily in this order)\n newc[iel] = 8;\n vassign[iel][0] = 0;\n vassign[iel][1] = 1;\n vassign[iel][2] = 2;\n vassign[iel][3] = 3;\n vassign[iel][4] = 4;\n vassign[iel][5] = 5;\n vassign[iel][6] = 6;\n vassign[iel][7] = 7;\n\n } else if (sumcb == 9) {\n // the three face type for face 1,3 and 5 are 0 0 1(not \n // necessarily in this order)\n newc[iel] = 7;\n vassign[iel][0] = 1;\n vassign[iel][1] = 2;\n vassign[iel][2] = 3;\n vassign[iel][3] = 4;\n vassign[iel][4] = 5;\n vassign[iel][5] = 6;\n vassign[iel][6] = 7;\n\n } else if (sumcb == 12) {\n // the three face type for face 1,3 and 5 are 0 1 1(not \n // necessarily in this order)\n if (cb == 0) {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][3] == 3 && sje[ntemp][3][0][0] < iel) {\n newc[iel] = 6;\n vassign[iel][0] = 1;\n vassign[iel][1] = 2;\n vassign[iel][2] = 3;\n vassign[iel][3] = 5;\n vassign[iel][4] = 6;\n vassign[iel][5] = 7;\n } else {\n newc[iel] = 7;\n vassign[iel][0] = 1;\n vassign[iel][1] = 2;\n vassign[iel][2] = 3;\n vassign[iel][3] = 4;\n vassign[iel][4] = 5;\n vassign[iel][5] = 6;\n vassign[iel][6] = 7;\n }\n } else if (cb1 == 0) {\n newc[iel] = 7;\n vassign[iel][0] = 1;\n vassign[iel][1] = 2;\n vassign[iel][2] = 3;\n vassign[iel][3] = 4;\n vassign[iel][4] = 5;\n vassign[iel][5] = 6;\n vassign[iel][6] = 7;\n } else if (cb2 == 0) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] < iel) {\n newc[iel] = 6;\n vassign[iel][0] = 2;\n vassign[iel][1] = 3;\n vassign[iel][2] = 4;\n vassign[iel][3] = 5;\n vassign[iel][4] = 6;\n vassign[iel][5] = 7;\n } else {\n newc[iel] = 7;\n vassign[iel][0] = 1;\n vassign[iel][1] = 2;\n vassign[iel][2] = 3;\n vassign[iel][3] = 4;\n vassign[iel][4] = 5;\n vassign[iel][5] = 6;\n vassign[iel][6] = 7;\n }\n }\n\n } else if (sumcb == 15) {\n // the three face type for face 1,3 and 5 are 1 1 1(not \n // necessarily in this order)\n ntemp = sje[iel][3][0][0];\n ntemp1 = sje[iel][1][0][0];\n if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] < iel) {\n if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] < iel) {\n if (cbc[ntemp1][5] == 3 && sje[ntemp1][5][0][0] < iel) {\n newc[iel] = 4;\n vassign[iel][0] = 3;\n vassign[iel][1] = 5;\n vassign[iel][2] = 6;\n vassign[iel][3] = 7;\n } else {\n newc[iel] = 5;\n vassign[iel][0] = 2;\n vassign[iel][1] = 3;\n vassign[iel][2] = 5;\n vassign[iel][3] = 6;\n vassign[iel][4] = 7;\n }\n } else {\n if (cbc[ntemp1][5] == 3 && sje[ntemp1][5][0][0] < iel) {\n newc[iel] = 5;\n vassign[iel][0] = 3;\n vassign[iel][1] = 4;\n vassign[iel][2] = 5;\n vassign[iel][3] = 6;\n vassign[iel][4] = 7;\n } else {\n newc[iel] = 6;\n vassign[iel][0] = 2;\n vassign[iel][1] = 3;\n vassign[iel][2] = 4;\n vassign[iel][3] = 5;\n vassign[iel][4] = 6;\n vassign[iel][5] = 7;\n }\n }\n } else {\n if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] < iel) {\n if (cbc[ntemp1][5] == 3 && sje[ntemp1][5][0][0] < iel) {\n newc[iel] = 5;\n vassign[iel][0] = 1;\n vassign[iel][1] = 3;\n vassign[iel][2] = 5;\n vassign[iel][3] = 6;\n vassign[iel][4] = 7;\n } else {\n newc[iel] = 6;\n vassign[iel][0] = 1;\n vassign[iel][1] = 2;\n vassign[iel][2] = 3;\n vassign[iel][3] = 5;\n vassign[iel][4] = 6;\n vassign[iel][5] = 7;\n }\n } else {\n if (cbc[ntemp1][5] == 3 && sje[ntemp1][5][0][0] < iel) {\n newc[iel] = 6;\n vassign[iel][0] = 1;\n vassign[iel][1] = 3;\n vassign[iel][2] = 4;\n vassign[iel][3] = 5;\n vassign[iel][4] = 6;\n vassign[iel][5] = 7;\n\n } else {\n newc[iel] = 7;\n vassign[iel][0] = 1; \n vassign[iel][1] = 2; \n vassign[iel][2] = 3; \n vassign[iel][3] = 4;\n vassign[iel][4] = 5;\n vassign[iel][5] = 6;\n vassign[iel][6] = 7;\n }\n }\n }\n }\n } #pragma omp parallel for default(shared) private(iel,sumcb,ij1,ij2, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/mason.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel)", "context_chars": 100, "text": "indices are to be generated from element 0 to iel.\n // front[iel]=newc[0]+newc[1]+...+newc[iel]\n\n for (iel = 0; iel < nelt; iel++) {\n front[iel] = newc[iel];\n } #pragma omp parallel for default(shared) private(iel)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/mason.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel,i,count)", "context_chars": 100, "text": "tar point index of it will only\n // be generated on the element with the lowest element index. \n\n for (iel = 0; iel < nelt; iel++) {\n // compute the starting vertex mortar point index in element iel\n front[iel] = front[iel]-newc[iel];\n\n for (i = 0; i < newc[iel]; i++) {\n // count is the new mortar index number, which will be assigned\n // to a vertex of iel and broadcast to all other elements sharing\n // this vertex point.\n count = front[iel]+i;\n mortar_vertex(vassign[iel][i], iel, count);\n }\n } #pragma omp parallel for default(shared) private(iel,i,count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/mason.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel,cb1,cb2,cb3,cb4,cb5 \\", "context_chars": 100, "text": "of iel is smaller than \n // that of its neighbor connected, neighbored by edge n only\n ,cb6,ntemp)\n for (iel = 0; iel < nelt; iel++) {\n newc[iel] = 0;\n newe[iel] = 0;\n newi[iel] = 0;\n cb1 = cbc[iel][0];\n cb2 = cbc[iel][1];\n cb3 = cbc[iel][2];\n cb4 = cbc[iel][3];\n cb5 = cbc[iel][4];\n cb6 = cbc[iel][5];\n\n // on face 6\n\n if (cb6 == 0) {\n if (cb4 == 0 || cb4 == 1) {\n // if face 6 is of type 0 and face 4 is of type 0 or type 1, the edge\n // shared by face 4 and 6 (edge 10) will generate new mortar point\n // indices.\n newe[iel] = newe[iel]+1;\n eassign[iel][10] = true;\n }\n if (cb1 != 3) {\n // if face 1 is of type 3, the edge shared by face 6 and 1 (edge 0)\n // will generate new mortar points indices.\n newe[iel] = newe[iel]+1;\n eassign[iel][0] = true;\n }\n if (cb3 != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][8] = true;\n }\n if (cb2 == 0 || cb2 == 1) {\n newe[iel] = newe[iel]+1;\n eassign[iel][4] = true;\n }\n } else if (cb6 == 1) {\n if (cb4 == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][10] = true;\n } else if (cb4 == 1) {\n\n // If face 6 and face 4 both are of type 1, ntemp is the neighbor\n // element on face 4.\n ntemp = sje[iel][3][0][0];\n\n // if ntemp's face 6 is not noncoforming or the neighbor element\n // of ntemp on face 6 has an element index larger than iel, the \n // edge shared by face 6 and 4 (edge 10) will generate new mortar\n // point indices.\n if (cbc[ntemp][5] != 3 || sje[ntemp][5][0][0] > iel) {\n\n newe[iel] = newe[iel]+1;\n eassign[iel][10] = true;\n // if the face 6 of ntemp is of type 2\n if (cbc[ntemp][5] == 2) {\n // The neighbor element of iel, neighbored by edge 10, is \n // sje[ntemp][5][0][0] (the neighbor element of ntemp on ntemp's\n // face 6).\n diagn[iel][10][0] = sje[ntemp][5][0][0];\n // The neighbor element of iel, neighbored by edge 10 shares\n // the ijel[iel][5][1] part of edge 10 of iel\n diagn[iel][10][1] = ijel[iel][5][1];\n // edge 9 of element sje[ntemp][5][0][0] (the neighbor element of \n // ntemp on ntemp's face 6) is a nonconforming edge\n ncon_edge[sje[ntemp][5][0][0]][9] = true;\n // if_1_edge[iel][n]=true indicates that iel is of a smaller\n //size than its neighbor element, neighbored by edge n of iel only.\n if_1_edge[iel][10] = true;\n }\n if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] > iel) {\n diagn[iel][10][0] = sje[ntemp][5][ijel[iel][5][1]][1];\n }\n }\n }\n\n if (cb1 == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][0] = true;\n } else if (cb1 == 1) {\n ntemp = sje[iel][0][0][0];\n if (cbc[ntemp][5] != 3 || sje[ntemp][5][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][0] = true;\n if (cbc[ntemp][5] == 2) {\n diagn[iel][0][0] = sje[ntemp][5][0][0];\n diagn[iel][0][1] = ijel[iel][5][0];\n ncon_edge[sje[ntemp][5][0][0]][6] = true;\n if_1_edge[iel][0] = true;\n }\n if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] > iel) {\n diagn[iel][0][0] = sje[ntemp][5][0][ijel[iel][5][0]];\n }\n }\n } else if (cb1 == 2) {\n if (ijel[iel][5][1] == 1) {\n ntemp = sje[iel][0][0][0];\n if (cbc[ntemp][5] == 1) {\n newe[iel] = newe[iel]+1;\n eassign[iel][0] = true;\n // if cbc[ntemp][5]=2\n } else {\n if (sje[ntemp][5][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][0] = true;\n diagn[iel][0][0] = sje[ntemp][5][0][0];\n }\n }\n } else {\n newe[iel] = newe[iel]+1;\n eassign[iel][0] = true;\n }\n }\n\n if (cb3 == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][8] = true;\n } else if (cb3 == 1) {\n ntemp = sje[iel][2][0][0];\n if (cbc[ntemp][5] != 3 || sje[ntemp][5][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][8] = true;\n if (cbc[ntemp][5] == 2) {\n diagn[iel][8][0] = sje[ntemp][5][0][0];\n diagn[iel][8][1] = ijel[iel][5][1];\n ncon_edge[sje[ntemp][5][0][0]][11] = true;\n if_1_edge[iel][8] = true;\n }\n if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] > iel) {\n diagn[iel][8][0] = sje[ntemp][5][ijel[iel][5][1]][1];\n }\n }\n } else if (cb3 == 2) {\n if (ijel[iel][5][0] == 1) {\n ntemp = sje[iel][2][0][0];\n if (cbc[ntemp][5] == 1) {\n newe[iel] = newe[iel]+1;\n eassign[iel][8] = true;\n // if cbc[ntemp][5]=2\n } else {\n if (sje[ntemp][5][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][8] = true;\n diagn[iel][8][0] = sje[ntemp][5][0][0];\n }\n }\n } else {\n newe[iel] = newe[iel]+1;\n eassign[iel][8] = true;\n }\n }\n\n if (cb2 == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][4] = true;\n } else if (cb2 == 1) {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][5] != 3 || sje[ntemp][5][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][4] = true;\n if (cbc[ntemp][5] == 2) {\n diagn[iel][4][0] = sje[ntemp][5][0][0];\n diagn[iel][4][1] = ijel[iel][5][0];\n ncon_edge[sje[ntemp][5][0][0]][2] = true;\n if_1_edge[iel][4] = true;\n }\n if (cbc[ntemp][5] == 3 && sje[ntemp][5][0][0] > iel) {\n diagn[iel][8][0] = sje[ntemp][5][ijel[iel][5][1]][1];\n }\n }\n }\n }\n\n // one face 4\n if (cb4 == 0) {\n if (cb1 != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][3] = true;\n }\n if (cb5 != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][11] = true;\n }\n if (cb2 == 0 || cb2 == 1) {\n newe[iel] = newe[iel]+1;\n eassign[iel][7] = true;\n } \n\n } else if (cb4 == 1) {\n if (cb1 == 2) {\n if (ijel[iel][3][1] == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][3] = true;\n } else {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][0] != 3 || sje[ntemp][0][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][3] = true;\n if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] > iel) {\n diagn[iel][3][0] = sje[ntemp][0][1][ijel[iel][3][0]];\n }\n }\n }\n } else if (cb1 == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][3] = true;\n } else if (cb1 == 1) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][0] != 3 || sje[ntemp][0][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][3] = true;\n if (cbc[ntemp][0] == 2) {\n diagn[iel][3][0] = sje[ntemp][0][0][0];\n diagn[iel][3][1] = ijel[iel][3][0];\n ncon_edge[sje[ntemp][0][0][0]][5] = true;\n if_1_edge[iel][3] = true;\n }\n if (cbc[ntemp][0] == 3 && sje[ntemp][0][0][0] > iel) {\n diagn[iel][3][0] = sje[ntemp][0][1][ijel[iel][3][0]];\n }\n }\n }\n if (cb5 == 2) {\n if (ijel[iel][3][0] == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][11] = true;\n } else {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][4] != 3 || sje[ntemp][4][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][11] = true;\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] > iel) {\n diagn[iel][11][0] = sje[ntemp][4][ijel[iel][3][1]][1];\n }\n }\n }\n } else if (cb5 == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][11] = true;\n } else if (cb5 == 1) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][4] != 3 || sje[ntemp][4][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][11] = true;\n if (cbc[ntemp][4] == 2) {\n diagn[iel][11][0] = sje[ntemp][4][0][0];\n diagn[iel][11][1] = ijel[iel][3][1];\n ncon_edge[sje[ntemp][4][0][0]][8] = true;\n if_1_edge[iel][11] = true;\n }\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] > iel) {\n diagn[iel][11][0] = sje[ntemp][4][ijel[iel][3][1]][1];\n }\n }\n }\n if (cb2 == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][7] = true;\n } else if (cb2 == 1) {\n ntemp = sje[iel][3][0][0];\n if (cbc[ntemp][1] != 3 || sje[ntemp][1][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][7] = true;\n if (cbc[ntemp][1] == 2) {\n diagn[iel][7][0] = sje[ntemp][1][0][0];\n diagn[iel][7][1] = ijel[iel][3][0];\n ncon_edge[sje[ntemp][1][0][0]][1] = true;\n if_1_edge[iel][7] = true;\n }\n if (cbc[ntemp][1] == 3 && sje[ntemp][1][0][0] > iel) {\n diagn[iel][7][0] = sje[ntemp][2][1][ijel[iel][3][0]];\n }\n }\n }\n }\n\n // on face 2\n if (cb2 == 0) {\n if (cb3 != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][5] = true;\n }\n if (cb5 != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][6] = true;\n }\n } else if (cb2 == 1) {\n if (cb3 == 2) {\n if (ijel[iel][1][1] == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][5] = true;\n } else {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][2] != 3 || sje[ntemp][2][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][5] = true;\n if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] > iel) {\n diagn[iel][5][0] = sje[ntemp][2][1][ijel[iel][1][0]];\n }\n }\n }\n } else if (cb3 == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][5] = true;\n } else if (cb3 == 1) {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][2] != 3 || sje[ntemp][2][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][5] = true;\n if (cbc[ntemp][2] == 2) {\n diagn[iel][5][0] = sje[ntemp][2][0][0];\n diagn[iel][5][1] = ijel[iel][1][0];\n ncon_edge[sje[ntemp][2][0][0]][3] = true;\n if_1_edge[iel][5] = true;\n }\n if (cbc[ntemp][2] == 3 && sje[ntemp][2][0][0] > iel) {\n diagn[iel][5][0] = sje[ntemp][2][1][ijel[iel][3][0]];\n }\n }\n }\n if (cb5 == 2) {\n if (ijel[iel][1][0] == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][6] = true;\n } else {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][4] != 3 || sje[ntemp][4][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][6] = true;\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] > iel) {\n diagn[iel][6][0] = sje[ntemp][4][1][ijel[iel][1][1]];\n }\n }\n }\n } else if (cb5 == 0) {\n newe[iel] = newe[iel]+1;\n eassign[iel][6] = true;\n } else if (cb5 == 1) {\n ntemp = sje[iel][1][0][0];\n if (cbc[ntemp][4] != 3 || sje[ntemp][4][0][0] > iel) {\n newe[iel] = newe[iel]+1;\n eassign[iel][6] = true;\n if (cbc[ntemp][4] == 2) {\n diagn[iel][6][0] = sje[ntemp][4][0][0];\n diagn[iel][6][1] = ijel[iel][1][1];\n ncon_edge[sje[ntemp][4][0][0]][0] = true;\n if_1_edge[iel][6] = true;\n }\n if (cbc[ntemp][4] == 3 && sje[ntemp][4][0][0] > iel) {\n diagn[iel][6][0] = sje[ntemp][4][ijel[iel][3][1]][1];\n }\n }\n }\n }\n\n // on face 1\n if (cb1 == 1) {\n newe[iel] = newe[iel]+2;\n eassign[iel][1] = true;\n if (cb3 == 1) {\n ntemp = sje[iel][0][0][0];\n if (cbc[ntemp][2] == 2) {\n diagn[iel][1][0] = sje[ntemp][2][0][0];\n diagn[iel][1][1] = ijel[iel][0][0];\n ncon_edge[sje[ntemp][2][0][0]][7] = true;\n if_1_edge[iel][1] = true;\n } else if (cbc[ntemp][2] == 3) {\n diagn[iel][1][0] = sje[ntemp][2][0][ijel[iel][0][0]];\n }\n } else if (cb3 == 2) {\n ntemp = sje[iel][2][0][0];\n if (ijel[iel][0][1] == 1) {\n if (cbc[ntemp][0] == 2) {\n diagn[iel][1][0] = sje[ntemp][0][0][0];\n }\n }\n }\n\n eassign[iel][2] = true;\n if (cb5 == 1) {\n ntemp = sje[iel][0][0][0];\n if (cbc[ntemp][4] == 2) {\n diagn[iel][2][0] = sje[ntemp][4][0][0];\n diagn[iel][2][1] = ijel[iel][0][1];\n ncon_edge[sje[ntemp][4][0][0]][4] = true;\n if_1_edge[iel][2] = true;\n } else if (cbc[ntemp][4] == 3) {\n diagn[iel][2][0] = sje[ntemp][4][0][ijel[iel][0][1]];\n }\n } else if (cb5 == 2) {\n ntemp = sje[iel][4][0][0];\n if (ijel[iel][0][0] == 1) {\n if (cbc[ntemp][0] == 2) {\n diagn[iel][2][0] = sje[ntemp][0][0][0];\n }\n }\n\n }\n } else if (cb1 == 2) {\n if (cb3 == 2) {\n ntemp = sje[iel][0][0][0];\n if (cbc[ntemp][2] != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][1] = true;\n if (cbc[ntemp][2] == 2) {\n diagn[iel][1][0] = sje[ntemp][2][0][0];\n } \n }\n } else if (cb3 == 0 || cb3 == 1) {\n newe[iel] = newe[iel]+1;\n eassign[iel][1] = true;\n if (cb3 == 1) {\n ntemp = sje[iel][0][0][0];\n if (cbc[ntemp][2] == 2) {\n diagn[iel][1][0] = sje[ntemp][2][0][0];\n }\n }\n }\n if (cb5 == 2) {\n ntemp = sje[iel][0][0][0];\n if (cbc[ntemp][4] != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][2] = true;\n if (cbc[ntemp][4] == 2) {\n diagn[iel][2][0] = sje[ntemp][4][0][0];\n }\n }\n } else if (cb5 == 0 || cb5 == 1) {\n newe[iel] = newe[iel]+1;\n eassign[iel][2] = true;\n if (cb5 == 1) {\n ntemp = sje[iel][0][0][0];\n if (cbc[ntemp][4] == 2) {\n diagn[iel][2][0] = sje[ntemp][4][0][0];\n }\n }\n }\n } else if (cb1 == 0) {\n if (cb3 != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][1] = true;\n }\n if (cb5 != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][2] = true;\n }\n }\n\n // on face 3\n if (cb3 == 1) {\n newe[iel] = newe[iel]+1;\n eassign[iel][9] = true;\n if (cb5 == 1) {\n ntemp = sje[iel][2][0][0];\n if (cbc[ntemp][4] == 2) {\n diagn[iel][9][0] = sje[ntemp][4][0][0];\n diagn[iel][9][1] = ijel[iel][2][1];\n ncon_edge[sje[ntemp][4][0][0]][10] = true;\n if_1_edge[iel][9] = true;\n }\n }\n if (ijel[iel][2][0] == 1) {\n ntemp = sje[iel][2][0][0];\n if (cbc[ntemp][4] == 3) {\n diagn[iel][9][0] = sje[ntemp][4][ijel[iel][2][1]][0];\n }\n }\n } else if (cb3 == 2) {\n if (cb5 == 2) {\n ntemp = sje[iel][2][0][0];\n if (cbc[ntemp][4] != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][9] = true;\n if (cbc[ntemp][4] == 2) {\n diagn[iel][9][0] = sje[ntemp][4][0][0];\n }\n }\n } else if (cb5 == 0 || cb5 == 1) {\n newe[iel] = newe[iel]+1;\n eassign[iel][9] = true;\n if (cb5 == 1) {\n ntemp = sje[iel][2][0][0];\n if (cbc[ntemp][4] == 2) {\n diagn[iel][9][0] = sje[ntemp][4][0][0];\n } \n }\n }\n } else if (cb3 == 0) {\n if (cb5 != 3) {\n newe[iel] = newe[iel]+1;\n eassign[iel][9] = true;\n }\n }\n\n // CONFORMING FACE INTERIOR\n\n // find how many new mortar point indices will be assigned\n // to face interiors on all faces on each element\n\n // newi record how many new face interior points will be assigned\n\n // on face 6\n if (cb6 == 1 || cb6 == 0) {\n newi[iel] = newi[iel]+9;\n fassign[iel][5] = true;\n }\n // on face 4\n if (cb4 == 1 || cb4 == 0) {\n newi[iel] = newi[iel]+9;\n fassign[iel][3] = true;\n }\n // on face 2\n if (cb2 == 1 || cb2 == 0) {\n newi[iel] = newi[iel]+9;\n fassign[iel][1] = true;\n }\n // on face 1\n if (cb1 != 3) {\n newi[iel] = newi[iel]+9;\n fassign[iel][0] = true;\n }\n // on face 3\n if (cb3 != 3) {\n newi[iel] = newi[iel]+9;\n fassign[iel][2] = true;\n }\n // on face 5\n if (cb5 != 3) {\n newi[iel] = newi[iel]+9;\n fassign[iel][4] = true;\n }\n\n // newc is the total number of new mortar point indices\n // to be assigned to each element.\n newc[iel] = newe[iel]*3+newi[iel];\n } #pragma omp parallel for default(shared) private(iel,cb1,cb2,cb3,cb4,cb5 \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/mason.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel)", "context_chars": 100, "text": " conforming face interiors) from element 0 to iel.\n // front[iel]=newc[0]+newc[1]+...+newc[iel]\n\n for (iel = 0; iel < nelt; iel++) {\n front[iel] = newc[iel];\n } #pragma omp parallel for default(shared) private(iel)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/mason.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel,count,i,cb1,ne, \\", "context_chars": 100, "text": "g element face. On each face, first visit all \n // conforming edges, and then the face interior.\n space,ie,edge_g,face2,ie2,ntemp,ii,jj,jface,cb,mor_v)\n for (iel = 0; iel < nelt; iel++) {\n front[iel] = front[iel]-newc[iel];\n count = nvertex+front[iel];\n for (i = 0; i < 6; i++) {\n cb1 = cbc[iel][i];\n if (i < 2) {\n ne = 4;\n space = 1;\n } else if (i < 4) {\n ne = 3;\n space = 2;\n\n // i loops over faces. Only 4 faces need to be examed for edge visit.\n // On face 1, edge 0,1,2 and 3 will be visited. On face 2, edge 4,5,6\n // and 7 will be visited. On face 3, edge 8 and 9 will be visited and\n // on face 4, edge 10 and 11 will be visited. The 12 edges can be \n // covered by four faces, there is no need to visit edges on face\n // 5 and 6. So ne is set to be 0. \n // However, i still needs to loop over 4 and 5, since the interiors\n // of face 5 and 6 still need to be visited.\n\n } else {\n ne = 0;\n space = 1;\n }\n\n for (ie = 0; ie < ne; ie += space) {\n edge_g = edgenumber[i][ie];\n if (eassign[iel][edge_g]) {\n // generate the new mortar points index, mor_v\n mor_assign(mor_v, &count);\n // assign mor_v to local edge ie of face i on element iel\n mor_edge(ie, i, iel, mor_v);\n\n // Since this edge is shared by another face of element \n // iel, assign mor_v to the corresponding edge on the other \n // face also.\n\n // find the other face\n face2 = f_e_ef[i][ie];\n // find the local edge index of this edge on the other face\n ie2 = localedgenumber[edge_g][face2];\n // asssign mor_v to local edge ie2 of face face2 on element iel\n mor_edge(ie2, face2, iel, mor_v);\n\n // There are some neighbor elements also sharing this edge. Assign\n // mor_v to neighbor element, neighbored by face i.\n if (cbc[iel][i] == 2) {\n ntemp = sje[iel][i][0][0];\n mor_edge(ie, jjface[i], ntemp, mor_v);\n mor_edge(op[ie2], face2, ntemp, mor_v);\n }\n\n // assign mor_v to neighbor element neighbored by face face2\n if (cbc[iel][face2] == 2) {\n ntemp = sje[iel][face2][0][0];\n mor_edge(ie2, jjface[face2], ntemp, mor_v);\n mor_edge(op[ie], i, ntemp, mor_v);\n }\n\n // assign mor_v to neighbor element sharing this edge\n\n // if the neighbor is of the same size of iel\n if (!if_1_edge[iel][edgenumber[i][ie]]) {\n if (diagn[iel][edgenumber[i][ie]][0] != -1) {\n ntemp = diagn[iel][edgenumber[i][ie]][0];\n mor_edge(op[ie2], jjface[face2], ntemp, mor_v);\n mor_edge(op[ie], jjface[i], ntemp, mor_v);\n }\n\n // if the neighbor has a size larger than iel's\n } else {\n if (diagn[iel][edgenumber[i][ie]][0] != -1) {\n ntemp = diagn[iel][edgenumber[i][ie]][0];\n mor_ne(mor_v, diagn[iel][edgenumber[i][ie]][1], \n ie, i, ie2, face2, iel, ntemp);\n }\n }\n }\n } \n\n if (fassign[iel][i]) {\n // generate new mortar points index in face interior. \n // if face i is of type 2 or iel doesn't have a neighbor element,\n // assign new mortar point indices to interior mortar points\n // of face i of iel.\n cb = cbc[iel][i];\n if (cb == 1 || cb == 0) {\n for (jj = 1; jj < LX1-1; jj++) {\n for (ii = 1; ii < LX1-1; ii++) {\n idmo[iel][i][0][0][jj][ii] = count;\n count = count+1;\n }\n }\n\n // if face i is of type 2, assign new mortar point indices\n // to iel as well as to the neighboring element on face i\n } else if (cb == 2) {\n if (idmo[iel][i][0][0][1][1] == -1) {\n ntemp = sje[iel][i][0][0];\n jface = jjface[i];\n for (jj = 1; jj < LX1-1; jj++) {\n for (ii = 1; ii < LX1-1; ii++) {\n idmo[iel][i][0][0][jj][ii] = count;\n idmo[ntemp][jface][0][0][jj][ii] = count;\n count = count+1;\n }\n }\n } \n }\n }\n }\n } #pragma omp parallel for default(shared) private(iel,count,i,cb1,ne, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/mason.c", "omp_pragma_line": "#pragma omp parallel for default(shared) \\", "context_chars": 100, "text": "}\n }\n\n // for edges on nonconforming faces, copy the mortar points indices\n // from neighbors.\n private(iel,i,cb,jface,iii,jjj,ntemp,ii,jj)\n for (iel = 0; iel < nelt; iel++) {\n for (i = 0; i < 6; i++) {\n cb = cbc[iel][i];\n if (cb == 3) {\n // edges \n edgecopy_s(i, iel);\n } \n\n // face interior \n\n jface = jjface[i];\n if (cb == 3) {\n for (iii = 0; iii < 2; iii++) {\n for (jjj = 0; jjj < 2; jjj++) {\n ntemp = sje[iel][i][jjj][iii];\n for (jj = 0; jj < LX1; jj++) {\n for (ii = 0; ii < LX1; ii++) {\n idmo[iel][i][jjj][iii][jj][ii] =\n idmo[ntemp][jface][0][0][jj][ii];\n }\n }\n idmo[iel][i][jjj][iii][0][0] = idmo[ntemp][jface][0][0][0][0];\n idmo[iel][i][jjj][iii][0][LX1-1] = idmo[ntemp][jface][1][0][0][LX1-1];\n idmo[iel][i][jjj][iii][LX1-1][0] = idmo[ntemp][jface][0][1][LX1-1][0];\n idmo[iel][i][jjj][iii][LX1-1][LX1-1]=\n idmo[ntemp][jface][1][1][LX1-1][LX1-1];\n }\n }\n }\n }\n } #pragma omp parallel for default(shared) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel)", "context_chars": 100, "text": "rsen = 0;\n\n // skip[iel]=true indicates an element no longer exists (because it\n // got merged)\n for (iel = 0; iel < nelt; iel++) {\n skip[iel] = false;\n } #pragma omp parallel for default(shared) private(iel)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(miel,iel)", "context_chars": 100, "text": "iel) takes as argument the actual element index and returns the\n // morton index\n for (miel = 0; miel < nelt; miel++) {\n iel = mt_to_id[miel];\n id_to_mt[iel] = miel;\n } #pragma omp parallel for default(shared) private(miel,iel)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(miel,iel,ic, \\", "context_chars": 100, "text": " potential coarsening will make neighbor, \n // and neighbor's neighbor....break grid restriction\n ntp,parent,test,test1,i,test2,test3) shared(if_coarsen)\n for (miel = 0; miel < nelt; miel++) {\n ifcoa[miel] = false;\n front[miel] = 0;\n iel = mt_to_id_old[miel];\n // if an element is marked to be coarsened\n if (ich[iel] == 2) {\n\n // If the current element is the \"first\" child (front-left-\n // bottom) of its parent (tree[iel] mod 8 equals 0), then \n // find all its neighbors. Check whether they are from the same \n // parent.\n\n ic = tree[iel];\n if (!btest(ic,0) && !btest(ic,1) && !btest(ic,2)) {\n ntp[0] = iel;\n ntp[1] = sje[iel][0][0][0];\n ntp[2] = sje[iel][2][0][0];\n ntp[3] = sje[ntp[2]][0][0][0];\n ntp[4] = sje[iel][4][0][0];\n ntp[5] = sje[ntp[4]][0][0][0];\n ntp[6] = sje[ntp[4]][2][0][0];\n ntp[7] = sje[ntp[6]][0][0][0];\n\n parent = tree[iel] >> 3;\n test = false;\n\n test1 = true;\n for (i = 0; i < 8; i++) {\n if ((tree[ntp[i]] >> 3) != parent) test1 = false;\n }\n\n // check whether all child elements are marked to be coarsened\n if (test1) {\n test2 = true;\n for (i = 0; i < 8; i++) {\n if (ich[ntp[i]] != 2) test2 = false;\n }\n\n // check whether all child elements can be coarsened or not.\n if (test2) {\n test3 = true;\n for (i = 0; i < 8; i++) {\n if (!icheck(ntp[i],i)) test3 = false;\n }\n if (test3) test = true;\n }\n }\n // if the eight child elements are eligible to be coarsened\n // mark the first children ifcoa[miel]=true\n // mark them all ifcoa_id[]=true\n // front[miel] will be used to calculate (potentially in parallel) \n // how many elements with seuqnece numbers less than\n // miel will be coarsened.\n // skip[] marks that an element will no longer exist after merge.\n\n if (test) {\n ifcoa[miel] = true;\n for (i = 0; i < 8; i++) {\n ifcoa_id[ntp[i]] = true;\n }\n front[miel] = 1;\n for (i = 0; i < 7; i++) {\n skip[ntp[i+1]] = true;\n }\n if (!(*if_coarsen)) *if_coarsen = true;\n }\n } \n } \n } #pragma omp parallel for default(shared) private(miel,iel,ic, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(miel,iel,mielnew)", "context_chars": 100, "text": "ild) to be coarsened.\n\n // create array mt_to_id to convert actual element index to morton index\n for (miel = 0; miel < nelt; miel++) {\n iel = mt_to_id_old[miel];\n if (!skip[iel]) {\n if (ifcoa[miel]) {\n action[front[miel]-1] = miel;\n mielnew = miel-(front[miel]-1)*7;\n } else { \n mielnew = miel-front[miel]*7;\n }\n mt_to_id[mielnew] = iel;\n }\n } #pragma omp parallel for default(shared) private(miel,iel,mielnew)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(index,miel,iel,ntp)", "context_chars": 100, "text": "_to_id[mielnew] = iel;\n }\n }\n\n // perform the coarsening procedure (potentially in parallel)\n for (index = 0; index < num_coarsen; index++) {\n miel = action[index];\n iel = mt_to_id_old[miel];\n // find eight child elements to be coarsened\n ntp[0] = iel;\n ntp[1] = sje[iel][0][0][0];\n ntp[2] = sje[iel][2][0][0];\n ntp[3] = sje[ntp[2]][0][0][0];\n ntp[4] = sje[iel][4][0][0];\n ntp[5] = sje[ntp[4]][0][0][0];\n ntp[6] = sje[ntp[4]][2][0][0];\n ntp[7] = sje[ntp[6]][0][0][0];\n // merge them to be the parent\n merging(ntp);\n } #pragma omp parallel for default(shared) private(index,miel,iel,ntp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(miel)", "context_chars": 100, "text": "index, treetemp;\n int sjetemp[6][2][2], n1, n2, nelttemp;\n int cb, cbctemp[6];\n\n // initialize\n for (miel = 0; miel < nelt; miel++) {\n mt_to_id_old[miel] = mt_to_id[miel];\n mt_to_id[miel] = -1;\n action[miel] = -1;\n if (ich[mt_to_id_old[miel]] != 4) {\n front[miel] = 0;\n } else {\n front[miel] = 1;\n }\n } #pragma omp parallel for default(shared) private(miel)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(miel,iel)", "context_chars": 100, "text": "fine = front[nelt-1];\n\n // action[i] records the morton index of the i'th element to be refined\n for (miel = 0; miel < nelt; miel++) {\n iel = mt_to_id_old[miel];\n if (ich[iel] == 4) {\n action[front[miel]-1] = miel;\n }\n } #pragma omp parallel for default(shared) private(miel,iel)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(miel,iel,ntemp,mielnew)", "context_chars": 100, "text": "elements with index less than\n // iel (actual element index, not morton index), will be refined.\n for (miel = 0; miel < nelt; miel++) {\n iel = mt_to_id_old[miel];\n if (ich[iel] == 4) {\n ntemp = (front[miel]-1)*7;\n mielnew = miel+ntemp;\n } else {\n ntemp = front[miel]*7;\n mielnew = miel+ntemp;\n }\n\n mt_to_id[mielnew] = iel;\n ref_front_id[iel] = nelt+ntemp;\n } #pragma omp parallel for default(shared) private(miel,iel,ntemp,mielnew)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(index,miel,mielnew,iel, \\", "context_chars": 100, "text": " neighboring information.\n\n nelttemp = nelt;\n\n if (num_refine > 0) {\n *ifmortar = true;\n }\n\n nelt,treetemp,xctemp,yctemp,zctemp,cbctemp,sjetemp,ta1temp, \\\n ii,jj,ntemp,xleft,xright,xhalf,yleft,yright,yhalf,zleft,zright,\\\n zhalf,ndir,facedir,jface,cb,le,ne,n1,n2,i,j,k)\n for (index = 0; index < num_refine; index++) {\n // miel is old morton index and mielnew is new morton index after refinement.\n miel = action[index];\n mielnew = miel+(front[miel]-1)*7;\n iel = mt_to_id_old[miel];\n nelt = nelttemp+(front[miel]-1)*7;\n // save iel's information in a temporary array\n treetemp = tree[iel];\n for (i = 0; i < 8; i++) {\n xctemp[i] = xc[iel][i];\n yctemp[i] = yc[iel][i];\n zctemp[i] = zc[iel][i];\n }\n for (i = 0; i < 6; i++) {\n cbctemp[i] = cbc[iel][i];\n for (jj = 0; jj < 2; jj++) {\n for (ii = 0; ii < 2; ii++) {\n sjetemp[i][jj][ii] = sje[iel][i][jj][ii];\n }\n }\n }\n copy((double *)ta1temp, ta1[iel][0][0], NXYZ);\n\n // zero out iel here\n tree[iel] = 0;\n nr_init(cbc[iel], 6, 0);\n nr_init(sje[iel][0][0], 24, -1);\n nr_init(ijel[iel][0], 12, -1);\n r_init(ta1[iel][0][0], NXYZ, 0.0);\n\n // initialize new child elements:iel and nelt+1~nelt+7\n for (j = 0; j < 7; j++) {\n mt_to_id[mielnew+j+1] = nelt+j;\n tree[nelt+j] = 0;\n nr_init(cbc[nelt+j], 6, 0);\n nr_init(sje[nelt+j][0][0], 24, -1);\n nr_init(ijel[nelt+j][0], 12, -1);\n r_init(ta1[nelt+j][0][0], NXYZ, 0.0);\n }\n\n // update the tree[]\n ntemp = treetemp << 3;\n tree[iel] = ntemp;\n for (i = 0; i < 7; i++) {\n tree[nelt+i] = ntemp + ((i + 1) % 8);\n }\n // update the children's vertices' coordinates\n xhalf = xctemp[0]+(xctemp[1]-xctemp[0])/2.0;\n xleft = xctemp[0];\n xright = xctemp[1];\n yhalf = yctemp[0]+(yctemp[2]-yctemp[0])/2.0;\n yleft = yctemp[0];\n yright = yctemp[2];\n zhalf = zctemp[0]+(zctemp[4]-zctemp[0])/2.0;\n zleft = zctemp[0];\n zright = zctemp[4];\n\n for (j = 0; j < 7; j += 2) {\n for (i = 0; i < 7; i += 2) {\n xc[nelt+j][i] = xhalf;\n xc[nelt+j][i+1] = xright; \n }\n }\n\n for (j = 1; j < 6; j += 2) {\n for (i = 0; i < 7; i += 2) {\n xc[nelt+j][i] = xleft;\n xc[nelt+j][i+1] = xhalf;\n }\n }\n\n for (i = 0; i < 7; i += 2) {\n xc[iel][i] = xleft;\n xc[iel][i+1] = xhalf;\n }\n\n for (i = 0; i < 2; i++) {\n yc[nelt+0][i] = yleft;\n yc[nelt+3][i] = yleft;\n yc[nelt+4][i] = yleft;\n yc[nelt+0][i+4] = yleft;\n yc[nelt+3][i+4] = yleft;\n yc[nelt+4][i+4] = yleft;\n }\n for (i = 2; i < 4; i++) {\n yc[nelt+0][i] = yhalf;\n yc[nelt+3][i] = yhalf;\n yc[nelt+4][i] = yhalf;\n yc[nelt+0][i+4] = yhalf;\n yc[nelt+3][i+4] = yhalf;\n yc[nelt+4][i+4] = yhalf;\n }\n for (j = 1; j < 3; j++) {\n for (i = 0; i < 2; i++) {\n yc[nelt+j][i] = yhalf;\n yc[nelt+j+4][i] = yhalf;\n yc[nelt+j][i+4] = yhalf;\n yc[nelt+j+4][i+4] = yhalf;\n }\n for (i = 2; i < 4; i++) {\n yc[nelt+j][i] = yright;\n yc[nelt+j+4][i] = yright;\n yc[nelt+j][i+4] = yright;\n yc[nelt+j+4][i+4] = yright;\n }\n }\n\n for (i = 0; i < 2; i++) {\n yc[iel][i] = yleft;\n yc[iel][i+4] = yleft;\n }\n for (i = 2; i < 4; i++) {\n yc[iel][i] = yhalf;\n yc[iel][i+4] = yhalf;\n }\n\n for (j = 0; j < 3; j++) {\n for (i = 0; i < 4; i++) {\n zc[nelt+j][i] = zleft;\n zc[nelt+j][i+4] = zhalf;\n }\n }\n for (j = 3; j < 7; j++) {\n for (i = 0; i < 4; i++) {\n zc[nelt+j][i] = zhalf;\n zc[nelt+j][i+4] = zright;\n }\n }\n for (i = 0; i < 4; i++) {\n zc[iel][i] = zleft;\n zc[iel][i+4] = zhalf;\n }\n\n // update the children's neighbor information\n\n // ndir refers to the x,y,z directions, respectively.\n // facedir refers to the orientation of the face in each direction, \n // e.g. ndir=0, facedir=0 refers to face 1,\n // and ndir =0, facedir=1 refers to face 2.\n\n for (ndir = 0; ndir < 3; ndir++) {\n for (facedir = 0; facedir <= 1; facedir++) {\n i = 2*ndir+facedir;\n jface = jjface[i];\n cb = cbctemp[i];\n\n // find the new element indices of the four children on each\n // face of the parent element\n for (k = 0; k < 4; k++) {\n le[k] = le_arr[ndir][facedir][k]+nelt;\n ne[k] = le_arr[ndir][1-facedir][k]+nelt;\n }\n if (facedir == 0) {\n le[0] = iel;\n } else {\n ne[0] = iel;\n }\n // update neighbor information of the four child elements on each \n // face of the parent element\n for (k = 0; k < 4; k++) {\n cbc[le[k]][i] = 2;\n sje[le[k]][i][0][0] = ne[k];\n ijel[le[k]][i][0] = 0;\n ijel[le[k]][i][1] = 0;\n }\n\n // if the face type of the parent element is type 2\n if (cb == 2 ) {\n ntemp = sjetemp[i][0][0];\n\n // if the neighbor ntemp is not marked to be refined\n if (ich[ntemp] != 4) {\n cbc[ntemp][jface] = 3;\n ijel[ntemp][jface][0] = 0;\n ijel[ntemp][jface][1] = 0;\n\n for (k = 0; k < 4; k++) {\n cbc[ne[k]][i] = 1;\n sje[ne[k]][i][0][0] = ntemp;\n if (k == 0) {\n ijel[ne[k]][i][0] = 0;\n ijel[ne[k]][i][1] = 0;\n sje[ntemp][jface][0][0] = ne[k];\n } else if (k == 1) {\n ijel[ne[k]][i][0] = 0;\n ijel[ne[k]][i][1] = 1;\n sje[ntemp][jface][1][0] = ne[k];\n } else if (k == 2) {\n ijel[ne[k]][i][0] = 1;\n ijel[ne[k]][i][1] = 0;\n sje[ntemp][jface][0][1] = ne[k];\n } else if (k == 3) {\n ijel[ne[k]][i][0] = 1;\n ijel[ne[k]][i][1] = 1;\n sje[ntemp][jface][1][1] = ne[k];\n }\n }\n\n // if the neighbor ntemp is also marked to be refined\n } else {\n n1 = ref_front_id[ntemp];\n\n for (k = 0; k < 4; k++) {\n cbc[ne[k]][i] = 2;\n n2 = n1 + le_arr[ndir][facedir][k];\n if (n2 == n1+7) n2 = ntemp;\n sje[ne[k]][i][0][0] = n2;\n ijel[ne[k]][i][0] = 0;\n }\n }\n // if the face type of the parent element is type 3\n } else if (cb == 3) {\n for (k = 0; k < 4; k++) {\n cbc[ne[k]][i] = 2;\n if (k == 0) {\n ntemp = sjetemp[i][0][0];\n } else if (k == 1) {\n ntemp = sjetemp[i][1][0];\n } else if (k == 2) {\n ntemp = sjetemp[i][0][1];\n } else if (k == 3) {\n ntemp = sjetemp[i][1][1];\n }\n ijel[ne[k]][i][0] = 0;\n ijel[ne[k]][i][1] = 0;\n sje[ne[k]][i][0][0] = ntemp;\n cbc[ntemp][jface] = 2;\n sje[ntemp][jface][0][0] = ne[k];\n ijel[ntemp][jface][0] = 0;\n ijel[ntemp][jface][1] = 0;\n }\n\n // if the face type of the parent element is type 0\n } else if (cb == 0) {\n for (k = 0; k < 4; k++) {\n cbc[ne[k]][i] = cb;\n }\n }\n } \n } \n\n // map solution from parent element to children\n remap(ta1[iel], &ta1[ref_front_id[iel]], ta1temp);\n } #pragma omp parallel for default(shared) private(index,miel,mielnew,iel, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel,i,iftemp) \\", "context_chars": 100, "text": "rsen(logical *if_coarsen, int neltold)\n{\n logical iftemp;\n int iel, i;\n\n *if_coarsen = false;\n\n shared(if_coarsen)\n for (iel = 0; iel < neltold; iel++) {\n if (!skip[iel]) {\n ich[iel] = 0;\n if (!iftouch(iel)) {\n iftemp = false;\n for (i = 0; i < NSIDES; i++) {\n // if iel has a larger size than its face neighbors, it\n // can not be coarsened\n if (cbc[iel][i] == 3) {\n iftemp = true;\n }\n }\n if(!iftemp) {\n if (!(*if_coarsen)) *if_coarsen = true;\n ich[iel] = 2;\n }\n }\n }\n } #pragma omp parallel for default(shared) private(iel,i,iftemp) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel) shared(if_refine)", "context_chars": 100, "text": "----------------\nstatic void find_refine(logical *if_refine)\n{\n int iel;\n\n *if_refine = false;\n\n for (iel = 0; iel < nelt; iel++) {\n ich[iel] = 0;\n if (iftouch(iel)) {\n if ((xc[iel][1] - xc[iel][0]) > dlmin) {\n if (!(*if_refine)) *if_refine = true;\n ich[iel] = 4;\n }\n }\n } #pragma omp parallel for default(shared) private(iel) shared(if_refine)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/adapt.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel,i,jface,ntemp, \\", "context_chars": 100, "text": "eck_refine(logical *ifrepeat)\n{\n int iel, iface, ntemp, nntemp, i, jface;\n\n *ifrepeat = false;\n\n iface,nntemp) shared(ifrepeat)\n for (iel = 0; iel < nelt; iel++) {\n // if iel is marked to be refined\n if (ich[iel] == 4) {\n // check its six faces\n for (i = 0; i < NSIDES; i++) {\n jface = jjface[i];\n ntemp = sje[iel][i][0][0];\n // if one face neighbor is larger in size than iel\n if (cbc[iel][i] == 1) {\n // unmark iel\n ich[iel] = 0;\n // the large size neighbor ntemp is marked to be refined\n if (ich[ntemp] != 4) {\n if (!(*ifrepeat)) *ifrepeat = true;\n ich[ntemp] = 4;\n }\n // check iel's neighbor, neighbored by an edge on face i, which\n // must be a face neighbor of ntemp\n for (iface = 0; iface < NSIDES; iface++) {\n if (iface != i && iface != jface) {\n //if edge neighbors are larger than iel, mark them to be refined\n if (cbc[ntemp][iface] == 2) {\n nntemp = sje[ntemp][iface][0][0];\n // ifcor is to make sure the edge neighbor exist\n if (ich[nntemp] !=4 && ifcor(iel, nntemp, i, iface)) {\n ich[nntemp] = 4;\n }\n }\n }\n }\n //if face neighbor are of the same size of iel, check edge neighbors\n } else if (cbc[iel][i] == 2) {\n for (iface = 0; iface < NSIDES; iface++) {\n if (iface != i && iface != jface) {\n if (cbc[ntemp][iface] == 1) {\n nntemp = sje[ntemp][iface][0][0];\n ich[nntemp] = 4;\n ich[iel] = 0;\n if (!(*ifrepeat)) *ifrepeat = true;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for default(shared) private(iel,i,jface,ntemp, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/diffuse.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "of (am pm) in the CG algorithm\n // (see the specification)\n r_init_omp((double *)t, ntot, 0.0);\n for (i = 0; i < nmor; i++) {\n umor[i] = 0.0;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/diffuse.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(ie) ", "context_chars": 100, "text": "imeron) timer_stop(t_transf);\n\n // compute pdiffp which is (A theta pm) in the specification\n for (ie = 0; ie < nelt; ie++) {\n laplacian(pdiffp[ie], pdiff[ie], size_e[ie]);\n } #pragma omp parallel for default(shared) private(ie) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/diffuse.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(ie,iside)", "context_chars": 100, "text": "or, (double *)pdiffp);\n if (timeron) timer_stop(t_transfb);\n\n // apply boundary condition\n for (ie = 0; ie < nelt; ie++) {\n for (iside = 0; iside < NSIDES; iside++) {\n if(cbc[ie][iside] == 0) {\n facev(pdiffp[ie], iside, 0.0);\n }\n }\n } #pragma omp parallel for default(shared) private(ie,iside)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/precond.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(ie,isize,i,j,k,q) ", "context_chars": 100, "text": " i < LX1; i++) {\n dxtm1_2[j][i] = dxtm1[j][i]*dxtm1[j][i];\n }\n }\n\n rdtime = 1.0/dtime;\n\n for (ie = 0; ie < nelt; ie++) {\n r_init(dpcelm[ie][0][0], NXYZ, 0.0);\n isize = size_e[ie];\n for (k = 0; k < LX1; k++) {\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n for (q = 0; q < LX1; q++) {\n dpcelm[ie][k][j][i] = dpcelm[ie][k][j][i] + \n g1m1_s[isize][k][j][q] * dxtm1_2[q][i] +\n g1m1_s[isize][k][q][i] * dxtm1_2[q][j] +\n g1m1_s[isize][q][j][i] * dxtm1_2[q][k];\n }\n dpcelm[ie][k][j][i] = VISC*dpcelm[ie][k][j][i]+\n rdtime*bm1_s[isize][k][j][i];\n }\n }\n }\n } #pragma omp parallel for default(shared) private(ie,isize,i,j,k,q) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/precond.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "rtar points. NOTE: dpcmor for \n // nonconforming cases will be corrected in subroutine setpcmo \n for (i = 0; i < nmor; i++) {\n dpcmor[i] = 1.0/dpcmor[i];\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/precond.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(element_size,i,j,p,temp, \\", "context_chars": 100, "text": "; i++) {\n tcpre[j][col] = tcpre[j][col] + qbnew[0][i][j-1]*tmp[i][col];\n }\n }\n }\n\n mtemp,temp1,p0,ii,jj)\n for (element_size = 0; element_size < REFINE_MAX; element_size++) {\n // for conforming cases\n\n // pcmor_c[element_size][j][i] records the intermediate value \n // (preconditioner=1/pcmor_c) of the preconditor on collocation \n // point (i,j) on a conforming face of an element of size \n // element_size.\n\n for (j = 0; j < LX1/2+1; j++) {\n for (i = j; i < LX1/2+1; i++) {\n r_init((double *)p, NXYZ, 0.0);\n p[0][j][i] = 1.0;\n laplacian(temp, p, element_size);\n pcmor_c[element_size][j][i] = temp[0][j][i];\n pcmor_c[element_size][j][LX1-1-i] = temp[0][j][i];\n pcmor_c[element_size][i][j] = temp[0][j][i];\n pcmor_c[element_size][i][LX1-1-j] = temp[0][j][i];\n pcmor_c[element_size][LX1-1-i][j] = temp[0][j][i];\n pcmor_c[element_size][LX1-1-i][LX1-1-j] = temp[0][j][i];\n pcmor_c[element_size][LX1-1-j][i] = temp[0][j][i];\n pcmor_c[element_size][LX1-1-j][LX1-1-i] = temp[0][j][i];\n }\n }\n\n // for nonconforming cases \n\n // nonconforming face interior\n\n // pcmor_nc1[element_size][jj][ii][j][i] records the intermediate \n // preconditioner value on collocation point (i,j) on mortar \n // (ii,jj) on a nonconforming face of an element of size element_\n // size\n for (j = 1; j < LX1; j++) {\n for (i = j; i < LX1; i++) {\n r_init((double *)mtemp, LX1*LX1, 0.0);\n r_init((double *)p, NXYZ, 0.0);\n mtemp[j][i] = 1.0;\n // when i, j=LX1-1, mortar points are duplicated, so mtemp needs\n // to be doubled.\n if (i == (LX1-1)) mtemp[j][i] = mtemp[j][i]*2.0;\n if (j == (LX1-1)) mtemp[j][i] = mtemp[j][i]*2.0;\n transf_nc(mtemp, (double (*)[LX1])p);\n laplacian(temp, p, element_size);\n transfb_nc1(temp1, (double (*)[LX1])temp);\n\n // values at points (i,j) and (j,i) are the same\n pcmor_nc1[element_size][0][0][j][i] = temp1[j][i];\n pcmor_nc1[element_size][0][0][i][j] = temp1[j][i];\n }\n\n // when i, j=LX1-1, mortar points are duplicated. so pcmor_nc1 needs\n // to be doubled on those points\n pcmor_nc1[element_size][0][0][j][LX1-1]=\n pcmor_nc1[element_size][0][0][j][LX1-1]*2.0;\n pcmor_nc1[element_size][0][0][LX1-1][j]=\n pcmor_nc1[element_size][0][0][j][LX1-1];\n }\n pcmor_nc1[element_size][0][0][LX1-1][LX1-1]=\n pcmor_nc1[element_size][0][0][LX1-1][LX1-1]*2.0;\n\n // nonconforming edges\n j = 0;\n for (i = 1; i < LX1; i++) {\n r_init((double *)mtemp, LX1*LX1, 0.0);\n r_init((double *)p, NXYZ, 0.0);\n r_init((double *)p0, NXYZ, 0.0);\n mtemp[j][i] = 1.0;\n if (i == (LX1-1)) mtemp[j][i] = 2.0;\n transf_nc(mtemp, (double (*)[LX1])p);\n laplacian(temp, p, element_size);\n transfb_nc1(temp1, (double (*)[LX1])temp);\n pcmor_nc1[element_size][0][0][j][i] = temp1[j][i];\n pcmor_nc1[element_size][0][0][i][j] = temp1[j][i];\n\n for (ii = 0; ii < LX1; ii++) {\n // p0 is for the case that a nonconforming edge is shared by\n // two conforming faces\n p0[0][0][ii] = p[0][0][ii];\n for (jj = 0; jj < LX1; jj++) {\n // now p is for the case that a nonconforming edge is shared\n // by nonconforming faces\n p[jj][0][ii] = p[0][jj][ii];\n }\n }\n\n laplacian(temp, p, element_size);\n transfb_nc2(temp1, (double (*)[LX1])temp); \n\n // pcmor_nc2[element_size][jj][ii][j][i] gives the intermediate\n // preconditioner value on collocation point (i,j) on a \n // nonconforming face of an element with size size_element\n\n pcmor_nc2[element_size][0][0][j][i] = temp1[j][i]*2.0 ;\n pcmor_nc2[element_size][0][0][i][j] =\n pcmor_nc2[element_size][0][0][j][i];\n\n laplacian(temp, p0, element_size);\n transfb_nc0(temp1, temp); \n\n // pcmor_nc0[element_size][jj][ii][j][i] gives the intermediate\n // preconditioner value on collocation point (i,j) on a \n // conforming face of an element, which shares a nonconforming \n // edge with another conforming face\n pcmor_nc0[element_size][0][0][j][i] = temp1[j][i];\n pcmor_nc0[element_size][0][0][i][j] = temp1[j][i];\n }\n pcmor_nc1[element_size][0][0][j][LX1-1] =\n pcmor_nc1[element_size][0][0][j][LX1-1]*2.0;\n pcmor_nc1[element_size][0][0][LX1-1][j] =\n pcmor_nc1[element_size][0][0][j][LX1-1];\n pcmor_nc2[element_size][0][0][j][LX1-1] =\n pcmor_nc2[element_size][0][0][j][LX1-1]*2.0;\n pcmor_nc2[element_size][0][0][LX1-1][j] =\n pcmor_nc2[element_size][0][0][j][LX1-1];\n pcmor_nc0[element_size][0][0][j][LX1-1] =\n pcmor_nc0[element_size][0][0][j][LX1-1]*2.0;\n pcmor_nc0[element_size][0][0][LX1-1][j] =\n pcmor_nc0[element_size][0][0][j][LX1-1];\n\n // symmetrical copy\n for (i = 0; i < LX1-1; i++) {\n pcmor_nc1[element_size][1][0][j][i] =\n pcmor_nc1[element_size][0][0][j][LX1-1-i];\n pcmor_nc0[element_size][1][0][j][i] =\n pcmor_nc0[element_size][0][0][j][LX1-1-i];\n pcmor_nc2[element_size][1][0][j][i] =\n pcmor_nc2[element_size][0][0][j][LX1-1-i];\n }\n\n for (j = 1; j < LX1; j++) {\n for (i = 0; i < LX1-1; i++) {\n pcmor_nc1[element_size][1][0][j][i] =\n pcmor_nc1[element_size][0][0][j][LX1-1-i];\n }\n i = LX1-1;\n pcmor_nc1[element_size][1][0][j][i] =\n pcmor_nc1[element_size][0][0][j][LX1-1-i];\n pcmor_nc0[element_size][1][0][j][i] =\n pcmor_nc0[element_size][0][0][j][LX1-1-i];\n pcmor_nc2[element_size][1][0][j][i] =\n pcmor_nc2[element_size][0][0][j][LX1-1-i];\n } \n\n j = 0;\n i = 0;\n pcmor_nc1[element_size][0][1][j][i] =\n pcmor_nc1[element_size][0][0][LX1-1-j][i];\n pcmor_nc0[element_size][0][1][j][i] =\n pcmor_nc0[element_size][0][0][LX1-1-j][i];\n pcmor_nc2[element_size][0][1][j][i] =\n pcmor_nc2[element_size][0][0][LX1-1-j][i];\n for (j = 1; j < LX1-1; j++) {\n i = 0;\n pcmor_nc1[element_size][0][1][j][i] =\n pcmor_nc1[element_size][0][0][LX1-1-j][i];\n pcmor_nc0[element_size][0][1][j][i] =\n pcmor_nc0[element_size][0][0][LX1-1-j][i];\n pcmor_nc2[element_size][0][1][j][i] =\n pcmor_nc2[element_size][0][0][LX1-1-j][i];\n for (i = 1; i < LX1; i++) {\n pcmor_nc1[element_size][0][1][j][i] =\n pcmor_nc1[element_size][0][0][LX1-1-j][i];\n }\n }\n\n j = LX1-1;\n for (i = 1; i < LX1; i++) {\n pcmor_nc1[element_size][0][1][j][i] =\n pcmor_nc1[element_size][0][0][LX1-1-j][i];\n pcmor_nc0[element_size][0][1][j][i] =\n pcmor_nc0[element_size][0][0][LX1-1-j][i];\n pcmor_nc2[element_size][0][1][j][i] =\n pcmor_nc2[element_size][0][0][LX1-1-j][i];\n }\n\n j = 0;\n i = LX1-1;\n pcmor_nc1[element_size][1][1][j][i] =\n pcmor_nc1[element_size][0][0][LX1-1-j][LX1-1-i];\n pcmor_nc0[element_size][1][1][j][i] =\n pcmor_nc0[element_size][0][0][LX1-1-j][LX1-1-i];\n pcmor_nc2[element_size][1][1][j][i] =\n pcmor_nc2[element_size][0][0][LX1-1-j][LX1-1-i];\n\n for (j = 1; j < LX1-1; j++) {\n for (i = 1; i < LX1-1; i++) {\n pcmor_nc1[element_size][1][1][j][i] =\n pcmor_nc1[element_size][0][0][LX1-1-j][LX1-1-i];\n }\n i = LX1-1;\n pcmor_nc1[element_size][1][1][j][i] =\n pcmor_nc1[element_size][0][0][LX1-1-j][LX1-1-i];\n pcmor_nc0[element_size][1][1][j][i] =\n pcmor_nc0[element_size][0][0][LX1-1-j][LX1-1-i];\n pcmor_nc2[element_size][1][1][j][i] =\n pcmor_nc2[element_size][0][0][LX1-1-j][LX1-1-i];\n }\n j = LX1-1;\n for (i = 1; i < LX1-1; i++) {\n pcmor_nc1[element_size][1][1][j][i] =\n pcmor_nc1[element_size][0][0][LX1-1-j][LX1-1-i];\n pcmor_nc0[element_size][1][1][j][i] =\n pcmor_nc0[element_size][0][0][LX1-1-j][LX1-1-i];\n pcmor_nc2[element_size][1][1][j][i] =\n pcmor_nc2[element_size][0][0][LX1-1-j][LX1-1-i];\n }\n\n\n // vertices shared by at least one nonconforming face or edge\n\n // Among three edges and three faces sharing a vertex on an element\n // situation 1: only one edge is nonconforming\n // situation 2: two edges are nonconforming\n // situation 3: three edges are nonconforming\n // situation 4: one face is nonconforming \n // situation 5: one face and one edge are nonconforming \n // situation 6: two faces are nonconforming\n // situation 7: three faces are nonconforming\n\n r_init((double *)p0, NXYZ, 0.0);\n p0[0][0][0] = 1.0;\n laplacian(temp, p0, element_size);\n pcmor_cor[element_size][7] = temp[0][0][0];\n\n // situation 1\n r_init((double *)p0, NXYZ, 0.0);\n for (i = 0; i < LX1; i++) {\n p0[0][0][i] = tcpre[0][i];\n }\n laplacian(temp, p0, element_size);\n transfb_cor_e(1, &pcmor_cor[element_size][0], temp);\n\n // situation 2\n r_init((double *)p0, NXYZ, 0.0);\n for (i = 0; i < LX1; i++) {\n p0[0][0][i] = tcpre[0][i];\n p0[0][i][0] = tcpre[0][i];\n }\n laplacian(temp, p0, element_size);\n transfb_cor_e(2, &pcmor_cor[element_size][1], temp);\n\n // situation 3\n r_init((double *)p0, NXYZ, 0.0);\n for (i = 0; i < LX1; i++) {\n p0[0][0][i] = tcpre[0][i];\n p0[0][i][0] = tcpre[0][i];\n p0[i][0][0] = tcpre[0][i];\n }\n laplacian(temp, p0, element_size);\n transfb_cor_e(3, &pcmor_cor[element_size][2], temp);\n\n // situation 4\n r_init((double *)p0, NXYZ, 0.0);\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n p0[0][j][i] = tcpre[j][i];\n }\n }\n laplacian(temp, p0, element_size);\n transfb_cor_f(4, &pcmor_cor[element_size][3], temp);\n\n // situation 5\n r_init((double *)p0, NXYZ, 0.0);\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n p0[0][j][i] = tcpre[j][i];\n }\n }\n for (i = 0; i < LX1; i++) {\n p0[i][0][0] = tcpre[0][i];\n }\n laplacian(temp, p0, element_size);\n transfb_cor_f(5, &pcmor_cor[element_size][4], temp);\n\n // situation 6\n r_init((double *)p0, NXYZ, 0.0);\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n p0[0][j][i] = tcpre[j][i];\n p0[j][0][i] = tcpre[j][i];\n }\n }\n laplacian(temp, p0, element_size);\n transfb_cor_f(6, &pcmor_cor[element_size][5], temp);\n\n // situation 7\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n p0[0][j][i] = tcpre[j][i];\n p0[j][0][i] = tcpre[j][i];\n p0[j][i][0] = tcpre[j][i];\n }\n }\n laplacian(temp, p0, element_size);\n transfb_cor_f(7, &pcmor_cor[element_size][6], temp);\n } #pragma omp parallel for default(shared) private(element_size,i,j,p,temp, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/precond.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(iel,iside,sizei, \\", "context_chars": 100, "text": "; i++) {\n edgevis[iel][iside][i] = false;\n } \n } \n } \n } //end parallel\n\n imor,_enum,face2,nb1,nb2,i,j,nn1,nn2) \n for (iel = 0; iel < nelt; iel++) {\n for (iside = 0; iside < NSIDES; iside++) {\n // for nonconforming faces\n if (cbc[iel][iside] == 3) {\n sizei = size_e[iel];\n\n // vertices\n\n // ifpcmor[imor] = true indicates that mortar point imor has \n // been visited\n imor = idmo[iel][iside][0][0][0][0];\n if (!ifpcmor[imor]) {\n // compute the preconditioner on mortar point imor\n pc_corner(imor);\n ifpcmor[imor] = true;\n }\n\n imor = idmo[iel][iside][1][0][0][LX1-1];\n if (!ifpcmor[imor]) {\n pc_corner(imor);\n ifpcmor[imor] = true;\n }\n\n imor = idmo[iel][iside][0][1][LX1-1][0];\n if (!ifpcmor[imor]) {\n pc_corner(imor);\n ifpcmor[imor] = true;\n }\n\n imor = idmo[iel][iside][1][1][LX1-1][LX1-1];\n if (!ifpcmor[imor]) {\n pc_corner(imor);\n ifpcmor[imor] = true;\n }\n\n // edges on nonconforming faces, _enum is local edge number\n for (_enum = 0; _enum < 4; _enum++) {\n // edgevis[iel][iside][_enum]=true indicates that local edge \n // _enum of face iside of iel has been visited\n if (!edgevis[iel][iside][_enum]) {\n edgevis[iel][iside][_enum] = true;\n\n // Examing neighbor element information,\n // calculateing the preconditioner value.\n face2 = f_e_ef[iside][_enum];\n if (cbc[iel][face2] == 2) {\n nb1 = sje[iel][face2][0][0];\n if (cbc[nb1][iside] == 2) {\n\n // Compute the preconditioner on local edge _enum on face\n // iside of element iel, 1 is neighborhood information got\n // by examing neighbors(nb1). For detailed meaning of 1, \n // see subroutine com_dpc.\n\n com_dpc(iside, iel, _enum, 1, sizei);\n nb2 = sje[nb1][iside][0][0];\n edgevis[nb2][jjface[face2]][op[e_face2[iside][_enum]]] = true;\n\n } else if (cbc[nb1][iside] == 3) {\n com_dpc(iside, iel, _enum, 2, sizei);\n edgevis[nb1][iside][op[_enum]] = true;\n }\n\n } else if (cbc[iel][face2] == 3) {\n edgevis[iel][face2][e_face2[iside][_enum]] = true;\n nb1 = sje[iel][face2][1][0];\n if (cbc[nb1][iside] == 1) {\n com_dpc(iside, iel, _enum, 3, sizei);\n nb2 = sje[nb1][iside][0][0];\n edgevis[nb2][jjface[iside]][op[_enum]] = true;\n edgevis[nb2][jjface[face2]][op[e_face2[iside][_enum]]] = true;\n } else if (cbc[nb1][iside] == 2) {\n com_dpc(iside, iel, _enum, 4, sizei);\n }\n } else if (cbc[iel][face2] == 0) {\n com_dpc(iside, iel, _enum, 0, sizei);\n }\n }\n }\n\n // mortar element interior (not edge of mortar) \n for (nn1 = 0; nn1 < 2; nn1++) {\n for (nn2 = 0; nn2 < 2; nn2++) {\n for (j = 1; j < LX1-1; j++) {\n for (i = 1; i < LX1-1; i++) {\n imor = idmo[iel][iside][nn2][nn1][j][i];\n dpcmor[imor] = 1.0/(pcmor_nc1[sizei][nn2][nn1][j][i]+\n pcmor_c[sizei+1][j][i]);\n }\n }\n }\n }\n\n // for i,j=LX1-1 there are duplicated mortar points, so \n // pcmor_c needs to be doubled or quadrupled\n i = LX1-1;\n for (j = 1; j < LX1-1; j++) {\n imor = idmo[iel][iside][0][0][j][i];\n dpcmor[imor] = 1.0/(pcmor_nc1[sizei][0][0][j][i]+\n pcmor_c[sizei+1][j][i]*2.0);\n imor = idmo[iel][iside][0][1][j][i];\n dpcmor[imor] = 1.0/(pcmor_nc1[sizei][0][1][j][i]+\n pcmor_c[sizei+1][j][i]*2.0);\n }\n\n j = LX1-1;\n imor = idmo[iel][iside][0][0][j][i];\n dpcmor[imor] = 1.0/(pcmor_nc1[sizei][0][0][j][i]+\n pcmor_c[sizei+1][j][i]*4.0);\n for (i = 1; i < LX1-1; i++) {\n imor = idmo[iel][iside][0][0][j][i];\n dpcmor[imor] = 1.0/(pcmor_nc1[sizei][0][0][j][i]+\n pcmor_c[sizei+1][j][i]*2.0);\n imor = idmo[iel][iside][1][0][j][i];\n dpcmor[imor] = 1.0/(pcmor_nc1[sizei][1][0][j][i]+\n pcmor_c[sizei+1][j][i]*2.0);\n }\n } \n }\n } #pragma omp parallel for default(shared) private(iel,iside,sizei, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/convect.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(rk4,rk3,rk2,temp,rk1,dtx3,\\", "context_chars": 100, "text": "\n yy0[substep] = Y00+VELY*subtime[substep];\n zz0[substep] = Z00+VELZ*subtime[substep];\n }\n\n dtx2,dtx1,iside,ip,sum,src,r2,i,j,k,isize,iel,tempa,xloc,yloc,zloc)\n for (iel = 0; iel < nelt; iel++) {\n isize = size_e[iel];\n /*\n xloc[i] is the location of i'th collocation in x direction in an element.\n yloc[i] is the location of j'th collocation in y direction in an element.\n zloc[i] is the location of k'th collocation in z direction in an element.\n */\n for (i = 0; i < LX1; i++) {\n xloc[i] = xfrac[i]*(xc[iel][1]-xc[iel][0])+xc[iel][0];\n }\n for (j = 0; j < LX1; j++) {\n yloc[j] = xfrac[j]*(yc[iel][3]-yc[iel][0])+yc[iel][0];\n }\n for (k = 0; k < LX1; k++) {\n zloc[k] = xfrac[k]*(zc[iel][4]-zc[iel][0])+zc[iel][0];\n }\n\n for (k = 0; k < LX1; k++) {\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n r2 = pow(xloc[i]-xx0[0],2.0)+pow(yloc[j]-yy0[0],2.0)+\n pow(zloc[k]-zz0[0],2.0);\n if (r2 <= alpha2) {\n src = cos(sqrt(r2)*pidivalpha)+1.0;\n } else {\n src = 0.0;\n }\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][i] * ta1[iel][k][j][ip];\n }\n dtx1 = -VELX*sum*xrm1_s[isize][k][j][i];\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][j] * ta1[iel][k][ip][i];\n }\n dtx2 = -VELY*sum*xrm1_s[isize][k][j][i];\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][k] * ta1[iel][ip][j][i];\n }\n dtx3 = -VELZ*sum*xrm1_s[isize][k][j][i];\n\n rk1[k][j][i] = dtx1 + dtx2 + dtx3 + src;\n temp[k][j][i] = ta1[iel][k][j][i]+dtime2*rk1[k][j][i];\n }\n }\n } \n\n for (k = 0; k < LX1; k++) {\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n r2 = pow(xloc[i]-xx0[1],2.0) + pow(yloc[j]-yy0[1],2.0) +\n pow(zloc[k]-zz0[1],2.0);\n if (r2 <= alpha2) {\n src = cos(sqrt(r2)*pidivalpha)+1.0;\n } else {\n src = 0.0;\n }\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][i] * temp[k][j][ip];\n }\n dtx1 = -VELX*sum*xrm1_s[isize][k][j][i];\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][j] * temp[k][ip][i];\n }\n dtx2 = -VELY*sum*xrm1_s[isize][k][j][i];\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][k] * temp[ip][j][i];\n }\n dtx3 = -VELZ*sum*xrm1_s[isize][k][j][i];\n\n rk2[k][j][i] = dtx1 + dtx2 + dtx3 + src;\n tempa[k][j][i] = ta1[iel][k][j][i]+dtime2*rk2[k][j][i];\n }\n }\n } \n\n for (k = 0; k < LX1; k++) {\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n r2 = pow(xloc[i]-xx0[1],2.0) + pow(yloc[j]-yy0[1],2.0) +\n pow(zloc[k]-zz0[1],2.0);\n if (r2 <= alpha2) {\n src = cos(sqrt(r2)*pidivalpha)+1.0;\n } else {\n src = 0.0;\n }\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][i] * tempa[k][j][ip];\n }\n dtx1 = -VELX*sum*xrm1_s[isize][k][j][i];\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][j] * tempa[k][ip][i];\n }\n dtx2 = -VELY*sum*xrm1_s[isize][k][j][i];\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][k] * tempa[ip][j][i];\n }\n dtx3 = -VELZ*sum*xrm1_s[isize][k][j][i];\n\n rk3[k][j][i] = dtx1 + dtx2 + dtx3 + src;\n temp[k][j][i] = ta1[iel][k][j][i]+dtime*rk3[k][j][i];\n }\n }\n } \n\n for (k = 0; k < LX1; k++) {\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n r2 = pow(xloc[i]-xx0[2],2.0) + pow(yloc[j]-yy0[2],2.0) +\n pow(zloc[k]-zz0[2],2.0);\n if (r2 <= alpha2) {\n src = cos(sqrt(r2)*pidivalpha)+1.0;\n } else {\n src = 0.0;\n }\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][i] * temp[k][j][ip];\n }\n dtx1 = -VELX*sum*xrm1_s[isize][k][j][i];\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][j] * temp[k][ip][i];\n }\n dtx2 = -VELY*sum*xrm1_s[isize][k][j][i];\n sum = 0.0;\n for (ip = 0; ip < LX1; ip++) {\n sum = sum + dxm1[ip][k] * temp[ip][j][i];\n }\n dtx3 = -VELZ*sum*xrm1_s[isize][k][j][i];\n\n rk4[k][j][i] = dtx1 + dtx2 + dtx3 + src;\n tempa[k][j][i] = sixth*(rk1[k][j][i]+2.0*\n rk2[k][j][i]+2.0*rk3[k][j][i]+rk4[k][j][i]);\n }\n }\n } \n\n // apply boundary condition\n for (iside = 0; iside < NSIDES; iside++) {\n if(cbc[iel][iside] == 0) {\n facev(tempa, iside, 0.0);\n }\n }\n\n for (k = 0; k < LX1; k++) {\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n trhs[iel][k][j][i]=bm1_s[isize][k][j][i]*(ta1[iel][k][j][i]*rdtime+\n tempa[k][j][i]);\n ta1[iel][k][j][i]=ta1[iel][k][j][i]+tempa[k][j][i]*dtime;\n }\n }\n }\n } #pragma omp parallel for default(shared) private(rk4,rk3,rk2,temp,rk1,dtx3,\\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/convect.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "ble *)ta1);\n } else {\n transfb_c((double *)ta1);\n }\n if (timeron) timer_stop(t_transfb_c);\n\n for (i = 0; i < nmor; i++) {\n tmort[i] = tmort[i] / mormult[i];\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/ua.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(ie,iside)", "context_chars": 100, "text": " condition: zero out the residual on domain boundaries\n\n // apply boundary conidtion to trhs\n for (ie = 0; ie < nelt; ie++) {\n for (iside = 0; iside < NSIDES; iside++) {\n if (cbc[ie][iside] == 0) {\n facev(trhs[ie], iside, 0.0);\n }\n }\n } #pragma omp parallel for default(shared) private(ie,iside)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/setup.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(isize,temp,temp1,temp2, \\", "context_chars": 100, "text": "isize, i, j, k, ntemp, iel;\n\n for (i = 0; i < LX1; i++) {\n xfrac[i] = zgm1[i]*0.5 + 0.5;\n }\n\n k,j,i,dtemp)\n for (isize = 0; isize < REFINE_MAX; isize++) {\n temp = pow(2.0, (-isize-2));\n dtemp = 1.0/temp;\n temp1 = temp*temp*temp;\n temp2 = temp*temp;\n for (k = 0; k < LX1; k++) {\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n xrm1_s[isize][k][j][i] = dtemp;\n jacm1_s[isize][k][j][i] = temp1;\n rxm1_s[isize][k][j][i] = temp2;\n g1m1_s[isize][k][j][i] = w3m1[k][j][i]*temp;\n bm1_s[isize][k][j][i] = w3m1[k][j][i]*temp1;\n g4m1_s[isize][k][j][i] = g1m1_s[isize][k][j][i]/wxm1[i];\n g5m1_s[isize][k][j][i] = g1m1_s[isize][k][j][i]/wxm1[j];\n g6m1_s[isize][k][j][i] = g1m1_s[isize][k][j][i]/wxm1[k];\n }\n }\n }\n } #pragma omp parallel for default(shared) private(isize,temp,temp1,temp2, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/setup.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(ntemp,i,j,iel)", "context_chars": 100, "text": ";\n g6m1_s[isize][k][j][i] = g1m1_s[isize][k][j][i]/wxm1[k];\n }\n }\n }\n }\n\n for (iel = 0; iel < LELT; iel++) {\n ntemp = LX1*LX1*LX1*iel;\n for (j = 0; j < LX1; j++) {\n for (i = 0; i < LX1; i++) {\n idel[iel][0][j][i] = ntemp+i*LX1 + j*LX1*LX1+LX1 - 1;\n idel[iel][1][j][i] = ntemp+i*LX1 + j*LX1*LX1;\n idel[iel][2][j][i] = ntemp+i*1 + j*LX1*LX1+LX1*(LX1-1);\n idel[iel][3][j][i] = ntemp+i*1 + j*LX1*LX1;\n idel[iel][4][j][i] = ntemp+i*1 + j*LX1+LX1*LX1*(LX1-1);\n idel[iel][5][j][i] = ntemp+i*1 + j*LX1;\n }\n }\n } #pragma omp parallel for default(shared) private(ntemp,i,j,iel)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/transfer.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "---------------------------------\nvoid init_locks()\n{\n int i;\n\n // initialize locks in parallel\n for (i = 0; i < LMOR; i++) {\n omp_init_lock(&tlock[i]);\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/UA/transfer.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(il,j,ig,i,col,ije2,ije1, \\", "context_chars": 100, "text": "2, col, i, j, ig, il;\n\n // zero out tx on element boundaries\n col2(tx, (double *)tmult, ntot);\n\n ig4,ig3,ig2,ig1,nnje,il4,il3,il2,il1,iface,ie,tmp)\n for (ie = 0; ie < nelt; ie++) {\n for (iface = 0; iface < NSIDES; iface++) {\n // get the collocation point index of the four local corners on the\n // face iface of element ie\n il1 = idel[ie][iface][0][0];\n il2 = idel[ie][iface][0][LX1-1];\n il3 = idel[ie][iface][LX1-1][0];\n il4 = idel[ie][iface][LX1-1][LX1-1];\n\n // get the mortar indices of the four local corners\n ig1 = idmo[ie][iface][0][0][0][0];\n ig2 = idmo[ie][iface][1][0][0][LX1-1];\n ig3 = idmo[ie][iface][0][1][LX1-1][0];\n ig4 = idmo[ie][iface][1][1][LX1-1][LX1-1];\n\n // copy the value from tmor to tx for these four local corners\n tx[il1] = tmor[ig1];\n tx[il2] = tmor[ig2];\n tx[il3] = tmor[ig3];\n tx[il4] = tmor[ig4];\n\n // nnje=1 for conforming faces, nnje=2 for nonconforming faces\n if (cbc[ie][iface] == 3) {\n nnje = 2;\n } else {\n nnje = 1;\n }\n\n // for nonconforming faces\n if (nnje == 2) {\n // nonconforming faces have four pieces of mortar, first map them to\n // two intermediate mortars, stored in tmp\n r_init((double *)tmp, LX1*LX1*2, 0.0);\n\n for (ije1 = 0; ije1 < nnje; ije1++) {\n for (ije2 = 0; ije2 < nnje; ije2++) {\n for (col = 0; col < LX1; col++) {\n // in each row col, when coloumn i=1 or LX1, the value\n // in tmor is copied to tmp\n i = v_end[ije2];\n ig = idmo[ie][iface][ije2][ije1][col][i];\n tmp[ije1][col][i] = tmor[ig];\n\n // in each row col, value in the interior three collocation\n // points is computed by apply mapping matrix qbnew to tmor\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][col][i];\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][ije2][ije1][col][j];\n tmp[ije1][col][i] = tmp[ije1][col][i] +\n qbnew[ije2][j][i-1]*tmor[ig];\n }\n }\n }\n }\n }\n\n // mapping from two pieces of intermediate mortar tmp to element\n // face tx\n for (ije1 = 0; ije1 < nnje; ije1++) {\n // the first column, col=0, is an edge of face iface.\n // the value on the three interior collocation points, tx, is\n // computed by applying mapping matrices qbnew to tmp.\n // the mapping result is divided by 2, because there will be\n // duplicated contribution from another face sharing this edge.\n col = 0;\n for (i = 1; i < LX1-1; i++) {\n il= idel[ie][iface][i][col];\n for (j = 0; j < LX1; j++) {\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*\n tmp[ije1][j][col]*0.5;\n }\n }\n\n // for column 1 ~ lx-2\n for (col = 1; col < LX1-1; col++) {\n //when i=0 or LX1-1, the collocation points are also on an edge of\n // the face, so the mapping result also needs to be divided by 2\n i = v_end[ije1];\n il = idel[ie][iface][i][col];\n tx[il] = tx[il]+tmp[ije1][i][col]*0.5;\n\n // compute the value at interior collocation points in\n // columns 1 ~ LX1-1\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][col];\n for (j = 0; j < LX1; j++) {\n tx[il] = tx[il] + qbnew[ije1][j][i-1]* tmp[ije1][j][col];\n }\n }\n }\n\n // same as col=0\n col = LX1-1;\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][col];\n for (j = 0; j < LX1; j++) {\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*\n tmp[ije1][j][col]*0.5;\n }\n }\n }\n\n // for conforming faces\n } else {\n // face interior\n for (col = 1; col < LX1-1; col++) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][col][i];\n ig = idmo[ie][iface][0][0][col][i];\n tx[il] = tmor[ig];\n }\n }\n\n // edges of conforming faces\n\n // if local edge 0 is a nonconforming edge\n if (idmo[ie][iface][0][0][0][LX1-1] != -1) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][0][i];\n for (ije1 = 0; ije1 < 2; ije1++) {\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][ije1][0][0][j];\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*tmor[ig]*0.5;\n }\n }\n }\n\n // if local edge 0 is a conforming edge\n } else {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][0][i];\n ig = idmo[ie][iface][0][0][0][i];\n tx[il] = tmor[ig];\n }\n }\n\n // if local edge 1 is a nonconforming edge\n if (idmo[ie][iface][1][0][1][LX1-1] != -1) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][LX1-1];\n for (ije1 = 0; ije1 < 2; ije1++) {\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][1][ije1][j][LX1-1];\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*tmor[ig]*0.5;\n }\n }\n }\n\n // if local edge 1 is a conforming edge\n } else {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][LX1-1];\n ig = idmo[ie][iface][0][0][i][LX1-1];\n tx[il] = tmor[ig];\n }\n }\n\n // if local edge 2 is a nonconforming edge\n if (idmo[ie][iface][0][1][LX1-1][1] != -1) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][LX1-1][i];\n for (ije1 = 0; ije1 < 2; ije1++) {\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][ije1][1][LX1-1][j];\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*tmor[ig]*0.5;\n }\n }\n }\n\n // if local edge 2 is a conforming edge\n } else {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][LX1-1][i];\n ig = idmo[ie][iface][0][0][LX1-1][i];\n tx[il] = tmor[ig];\n }\n }\n\n // if local edge 3 is a nonconforming edge\n if (idmo[ie][iface][0][0][LX1-1][0] != -1) {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][0];\n for (ije1 = 0; ije1 < 2; ije1++) {\n for (j = 0; j < LX1; j++) {\n ig = idmo[ie][iface][0][ije1][j][0];\n tx[il] = tx[il] + qbnew[ije1][j][i-1]*tmor[ig]*0.5;\n }\n }\n }\n // if local edge 3 is a conforming edge\n } else {\n for (i = 1; i < LX1-1; i++) {\n il = idel[ie][iface][i][0];\n ig = idmo[ie][iface][0][0][i][0];\n tx[il] = tmor[ig];\n }\n }\n }\n }\n } #pragma omp parallel for default(shared) private(il,j,ig,i,col,ije2,ije1, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/SP/pinvr.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)", "context_chars": 100, "text": "nvr()\n{\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n\n if (timeron) timer_start(t_pinvr);\n for (k = 1; k <= nz2; k++) {\n for (j = 1; j <= ny2; j++) {\n for (i = 1; i <= nx2; i++) {\n r1 = rhs[k][j][i][0];\n r2 = rhs[k][j][i][1];\n r3 = rhs[k][j][i][2];\n r4 = rhs[k][j][i][3];\n r5 = rhs[k][j][i][4];\n\n t1 = bt * r1;\n t2 = 0.5 * ( r4 + r5 );\n\n rhs[k][j][i][0] = bt * ( r4 - r5 );\n rhs[k][j][i][1] = -r3;\n rhs[k][j][i][2] = r2;\n rhs[k][j][i][3] = -t1 + t2;\n rhs[k][j][i][4] = t1 + t2;\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/SP/x_solve.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,i1,i2,m, \\", "context_chars": 100, "text": "ve()\n{\n int i, j, k, i1, i2, m;\n double ru1, fac1, fac2;\n\n if (timeron) timer_start(t_xsolve);\n ru1,fac1,fac2)\n for (k = 1; k <= nz2; k++) {\n lhsinit(nx2+1, ny2);\n\n //---------------------------------------------------------------------\n // Computes the left hand side for the three x-factors \n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // first fill the lhs for the u-eigenvalue \n //---------------------------------------------------------------------\n for (j = 1; j <= ny2; j++) {\n for (i = 0; i <= grid_points[0]-1; i++) {\n ru1 = c3c4*rho_i[k][j][i];\n cv[i] = us[k][j][i];\n rhon[i] = max(max(dx2+con43*ru1,dx5+c1c5*ru1), max(dxmax+ru1,dx1));\n }\n\n for (i = 1; i <= nx2; i++) {\n lhs[j][i][0] = 0.0;\n lhs[j][i][1] = -dttx2 * cv[i-1] - dttx1 * rhon[i-1];\n lhs[j][i][2] = 1.0 + c2dttx1 * rhon[i];\n lhs[j][i][3] = dttx2 * cv[i+1] - dttx1 * rhon[i+1];\n lhs[j][i][4] = 0.0;\n }\n }\n\n //---------------------------------------------------------------------\n // add fourth order dissipation \n //---------------------------------------------------------------------\n for (j = 1; j <= ny2; j++) {\n i = 1;\n lhs[j][i][2] = lhs[j][i][2] + comz5;\n lhs[j][i][3] = lhs[j][i][3] - comz4;\n lhs[j][i][4] = lhs[j][i][4] + comz1;\n\n lhs[j][i+1][1] = lhs[j][i+1][1] - comz4;\n lhs[j][i+1][2] = lhs[j][i+1][2] + comz6;\n lhs[j][i+1][3] = lhs[j][i+1][3] - comz4;\n lhs[j][i+1][4] = lhs[j][i+1][4] + comz1;\n }\n\n for (j = 1; j <= ny2; j++) {\n for (i = 3; i <= grid_points[0]-4; i++) {\n lhs[j][i][0] = lhs[j][i][0] + comz1;\n lhs[j][i][1] = lhs[j][i][1] - comz4;\n lhs[j][i][2] = lhs[j][i][2] + comz6;\n lhs[j][i][3] = lhs[j][i][3] - comz4;\n lhs[j][i][4] = lhs[j][i][4] + comz1;\n }\n }\n\n for (j = 1; j <= ny2; j++) {\n i = grid_points[0]-3;\n lhs[j][i][0] = lhs[j][i][0] + comz1;\n lhs[j][i][1] = lhs[j][i][1] - comz4;\n lhs[j][i][2] = lhs[j][i][2] + comz6;\n lhs[j][i][3] = lhs[j][i][3] - comz4;\n\n lhs[j][i+1][0] = lhs[j][i+1][0] + comz1;\n lhs[j][i+1][1] = lhs[j][i+1][1] - comz4;\n lhs[j][i+1][2] = lhs[j][i+1][2] + comz5;\n }\n\n //---------------------------------------------------------------------\n // subsequently, fill the other factors (u+c), (u-c) by adding to \n // the first \n //---------------------------------------------------------------------\n for (j = 1; j <= ny2; j++) {\n for (i = 1; i <= nx2; i++) {\n lhsp[j][i][0] = lhs[j][i][0];\n lhsp[j][i][1] = lhs[j][i][1] - dttx2 * speed[k][j][i-1];\n lhsp[j][i][2] = lhs[j][i][2];\n lhsp[j][i][3] = lhs[j][i][3] + dttx2 * speed[k][j][i+1];\n lhsp[j][i][4] = lhs[j][i][4];\n lhsm[j][i][0] = lhs[j][i][0];\n lhsm[j][i][1] = lhs[j][i][1] + dttx2 * speed[k][j][i-1];\n lhsm[j][i][2] = lhs[j][i][2];\n lhsm[j][i][3] = lhs[j][i][3] - dttx2 * speed[k][j][i+1];\n lhsm[j][i][4] = lhs[j][i][4];\n }\n }\n\n //---------------------------------------------------------------------\n // FORWARD ELIMINATION \n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // perform the Thomas algorithm; first, FORWARD ELIMINATION \n //---------------------------------------------------------------------\n for (j = 1; j <= ny2; j++) {\n for (i = 0; i <= grid_points[0]-3; i++) {\n i1 = i + 1;\n i2 = i + 2;\n fac1 = 1.0/lhs[j][i][2];\n lhs[j][i][3] = fac1*lhs[j][i][3];\n lhs[j][i][4] = fac1*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n }\n lhs[j][i1][2] = lhs[j][i1][2] - lhs[j][i1][1]*lhs[j][i][3];\n lhs[j][i1][3] = lhs[j][i1][3] - lhs[j][i1][1]*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhs[j][i1][1]*rhs[k][j][i][m];\n }\n lhs[j][i2][1] = lhs[j][i2][1] - lhs[j][i2][0]*lhs[j][i][3];\n lhs[j][i2][2] = lhs[j][i2][2] - lhs[j][i2][0]*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhs[j][i2][0]*rhs[k][j][i][m];\n }\n }\n }\n\n //---------------------------------------------------------------------\n // The last two rows in this grid block are a bit different, \n // since they for (not have two more rows available for the\n // elimination of off-diagonal entries\n //---------------------------------------------------------------------\n for (j = 1; j <= ny2; j++) {\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n fac1 = 1.0/lhs[j][i][2];\n lhs[j][i][3] = fac1*lhs[j][i][3];\n lhs[j][i][4] = fac1*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n }\n lhs[j][i1][2] = lhs[j][i1][2] - lhs[j][i1][1]*lhs[j][i][3];\n lhs[j][i1][3] = lhs[j][i1][3] - lhs[j][i1][1]*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhs[j][i1][1]*rhs[k][j][i][m];\n }\n\n //---------------------------------------------------------------------\n // scale the last row immediately \n //---------------------------------------------------------------------\n fac2 = 1.0/lhs[j][i1][2];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i1][m] = fac2*rhs[k][j][i1][m];\n }\n }\n\n //---------------------------------------------------------------------\n // for (the u+c and the u-c factors \n //---------------------------------------------------------------------\n for (j = 1; j <= ny2; j++) {\n for (i = 0; i <= grid_points[0]-3; i++) {\n i1 = i + 1;\n i2 = i + 2;\n\n m = 3;\n fac1 = 1.0/lhsp[j][i][2];\n lhsp[j][i][3] = fac1*lhsp[j][i][3];\n lhsp[j][i][4] = fac1*lhsp[j][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsp[j][i1][2] = lhsp[j][i1][2] - lhsp[j][i1][1]*lhsp[j][i][3];\n lhsp[j][i1][3] = lhsp[j][i1][3] - lhsp[j][i1][1]*lhsp[j][i][4];\n rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsp[j][i1][1]*rhs[k][j][i][m];\n lhsp[j][i2][1] = lhsp[j][i2][1] - lhsp[j][i2][0]*lhsp[j][i][3];\n lhsp[j][i2][2] = lhsp[j][i2][2] - lhsp[j][i2][0]*lhsp[j][i][4];\n rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhsp[j][i2][0]*rhs[k][j][i][m];\n\n m = 4;\n fac1 = 1.0/lhsm[j][i][2];\n lhsm[j][i][3] = fac1*lhsm[j][i][3];\n lhsm[j][i][4] = fac1*lhsm[j][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsm[j][i1][2] = lhsm[j][i1][2] - lhsm[j][i1][1]*lhsm[j][i][3];\n lhsm[j][i1][3] = lhsm[j][i1][3] - lhsm[j][i1][1]*lhsm[j][i][4];\n rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsm[j][i1][1]*rhs[k][j][i][m];\n lhsm[j][i2][1] = lhsm[j][i2][1] - lhsm[j][i2][0]*lhsm[j][i][3];\n lhsm[j][i2][2] = lhsm[j][i2][2] - lhsm[j][i2][0]*lhsm[j][i][4];\n rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhsm[j][i2][0]*rhs[k][j][i][m];\n }\n }\n\n //---------------------------------------------------------------------\n // And again the last two rows separately\n //---------------------------------------------------------------------\n for (j = 1; j <= ny2; j++) {\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n\n m = 3;\n fac1 = 1.0/lhsp[j][i][2];\n lhsp[j][i][3] = fac1*lhsp[j][i][3];\n lhsp[j][i][4] = fac1*lhsp[j][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsp[j][i1][2] = lhsp[j][i1][2] - lhsp[j][i1][1]*lhsp[j][i][3];\n lhsp[j][i1][3] = lhsp[j][i1][3] - lhsp[j][i1][1]*lhsp[j][i][4];\n rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsp[j][i1][1]*rhs[k][j][i][m];\n\n m = 4;\n fac1 = 1.0/lhsm[j][i][2];\n lhsm[j][i][3] = fac1*lhsm[j][i][3];\n lhsm[j][i][4] = fac1*lhsm[j][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsm[j][i1][2] = lhsm[j][i1][2] - lhsm[j][i1][1]*lhsm[j][i][3];\n lhsm[j][i1][3] = lhsm[j][i1][3] - lhsm[j][i1][1]*lhsm[j][i][4];\n rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsm[j][i1][1]*rhs[k][j][i][m];\n\n //---------------------------------------------------------------------\n // Scale the last row immediately\n //---------------------------------------------------------------------\n rhs[k][j][i1][3] = rhs[k][j][i1][3]/lhsp[j][i1][2];\n rhs[k][j][i1][4] = rhs[k][j][i1][4]/lhsm[j][i1][2];\n }\n\n //---------------------------------------------------------------------\n // BACKSUBSTITUTION \n //---------------------------------------------------------------------\n for (j = 1; j <= ny2; j++) {\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3]*rhs[k][j][i1][m];\n }\n\n rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3]*rhs[k][j][i1][3];\n rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3]*rhs[k][j][i1][4];\n }\n\n //---------------------------------------------------------------------\n // The first three factors\n //---------------------------------------------------------------------\n for (j = 1; j <= ny2; j++) {\n for (i = grid_points[0]-3; i >= 0; i--) {\n i1 = i + 1;\n i2 = i + 2;\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = rhs[k][j][i][m] - \n lhs[j][i][3]*rhs[k][j][i1][m] -\n lhs[j][i][4]*rhs[k][j][i2][m];\n }\n\n //-------------------------------------------------------------------\n // And the remaining two\n //-------------------------------------------------------------------\n rhs[k][j][i][3] = rhs[k][j][i][3] - \n lhsp[j][i][3]*rhs[k][j][i1][3] -\n lhsp[j][i][4]*rhs[k][j][i2][3];\n rhs[k][j][i][4] = rhs[k][j][i][4] - \n lhsm[j][i][3]*rhs[k][j][i1][4] -\n lhsm[j][i][4]*rhs[k][j][i2][4];\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,i1,i2,m, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/SP/add.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,m)", "context_chars": 100, "text": "------------------------------\nvoid add()\n{\n int i, j, k, m;\n\n if (timeron) timer_start(t_add);\n for (k = 1; k <= nz2; k++) {\n for (j = 1; j <= ny2; j++) {\n for (i = 1; i <= nx2; i++) {\n for (m = 0; m < 5; m++) {\n u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m];\n }\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/SP/txinvr.c", "omp_pragma_line": "#pragma omp parallel for default(shared) \\", "context_chars": 100, "text": ", t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3, r4, r5, ac2inv;\n\n if (timeron) timer_start(t_txinvr);\n private(i,j,k,t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv)\n for (k = 1; k <= nz2; k++) {\n for (j = 1; j <= ny2; j++) {\n for (i = 1; i <= nx2; i++) {\n ru1 = rho_i[k][j][i];\n uu = us[k][j][i];\n vv = vs[k][j][i];\n ww = ws[k][j][i];\n ac = speed[k][j][i];\n ac2inv = ac*ac;\n\n r1 = rhs[k][j][i][0];\n r2 = rhs[k][j][i][1];\n r3 = rhs[k][j][i][2];\n r4 = rhs[k][j][i][3];\n r5 = rhs[k][j][i][4];\n\n t1 = c2 / ac2inv * ( qs[k][j][i]*r1 - uu*r2 - vv*r3 - ww*r4 + r5 );\n t2 = bt * ru1 * ( uu * r1 - r2 );\n t3 = ( bt * ru1 * ac ) * t1;\n\n rhs[k][j][i][0] = r1 - t1;\n rhs[k][j][i][1] = - ru1 * ( ww*r1 - r4 );\n rhs[k][j][i][2] = ru1 * ( vv*r1 - r3 );\n rhs[k][j][i][3] = - t2 + t3;\n rhs[k][j][i][4] = t2 + t3;\n }\n }\n } #pragma omp parallel for default(shared) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/SP/ninvr.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)", "context_chars": 100, "text": "nvr()\n{\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n\n if (timeron) timer_start(t_ninvr);\n for (k = 1; k <= nz2; k++) {\n for (j = 1; j <= ny2; j++) {\n for (i = 1; i <= nx2; i++) {\n r1 = rhs[k][j][i][0];\n r2 = rhs[k][j][i][1];\n r3 = rhs[k][j][i][2];\n r4 = rhs[k][j][i][3];\n r5 = rhs[k][j][i][4];\n\n t1 = bt * r3;\n t2 = 0.5 * ( r4 + r5 );\n\n rhs[k][j][i][0] = -r2;\n rhs[k][j][i][1] = r1;\n rhs[k][j][i][2] = bt * ( r4 - r5 );\n rhs[k][j][i][3] = -t1 + t2;\n rhs[k][j][i][4] = t1 + t2;\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/SP/z_solve.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,k1,k2,m, \\", "context_chars": 100, "text": "----------------------------------------------------------\n\n if (timeron) timer_start(t_zsolve);\n ru1,fac1,fac2)\n for (j = 1; j <= ny2; j++) {\n lhsinitj(nz2+1, nx2);\n\n //---------------------------------------------------------------------\n // Computes the left hand side for the three z-factors \n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // first fill the lhs for the u-eigenvalue \n //---------------------------------------------------------------------\n for (i = 1; i <= nx2; i++) {\n for (k = 0; k <= nz2+1; k++) {\n ru1 = c3c4*rho_i[k][j][i];\n cv[k] = ws[k][j][i];\n rhos[k] = max(max(dz4+con43*ru1, dz5+c1c5*ru1), max(dzmax+ru1, dz1));\n }\n\n for (k = 1; k <= nz2; k++) {\n lhs[k][i][0] = 0.0;\n lhs[k][i][1] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1];\n lhs[k][i][2] = 1.0 + c2dttz1 * rhos[k];\n lhs[k][i][3] = dttz2 * cv[k+1] - dttz1 * rhos[k+1];\n lhs[k][i][4] = 0.0;\n }\n }\n\n //---------------------------------------------------------------------\n // add fourth order dissipation \n //---------------------------------------------------------------------\n for (i = 1; i <= nx2; i++) {\n k = 1;\n lhs[k][i][2] = lhs[k][i][2] + comz5;\n lhs[k][i][3] = lhs[k][i][3] - comz4;\n lhs[k][i][4] = lhs[k][i][4] + comz1;\n\n k = 2;\n lhs[k][i][1] = lhs[k][i][1] - comz4;\n lhs[k][i][2] = lhs[k][i][2] + comz6;\n lhs[k][i][3] = lhs[k][i][3] - comz4;\n lhs[k][i][4] = lhs[k][i][4] + comz1;\n }\n\n for (k = 3; k <= nz2-2; k++) {\n for (i = 1; i <= nx2; i++) {\n lhs[k][i][0] = lhs[k][i][0] + comz1;\n lhs[k][i][1] = lhs[k][i][1] - comz4;\n lhs[k][i][2] = lhs[k][i][2] + comz6;\n lhs[k][i][3] = lhs[k][i][3] - comz4;\n lhs[k][i][4] = lhs[k][i][4] + comz1;\n }\n }\n\n for (i = 1; i <= nx2; i++) {\n k = nz2-1;\n lhs[k][i][0] = lhs[k][i][0] + comz1;\n lhs[k][i][1] = lhs[k][i][1] - comz4;\n lhs[k][i][2] = lhs[k][i][2] + comz6;\n lhs[k][i][3] = lhs[k][i][3] - comz4;\n\n k = nz2;\n lhs[k][i][0] = lhs[k][i][0] + comz1;\n lhs[k][i][1] = lhs[k][i][1] - comz4;\n lhs[k][i][2] = lhs[k][i][2] + comz5;\n }\n\n //---------------------------------------------------------------------\n // subsequently, fill the other factors (u+c), (u-c) \n //---------------------------------------------------------------------\n for (k = 1; k <= nz2; k++) {\n for (i = 1; i <= nx2; i++) {\n lhsp[k][i][0] = lhs[k][i][0];\n lhsp[k][i][1] = lhs[k][i][1] - dttz2 * speed[k-1][j][i];\n lhsp[k][i][2] = lhs[k][i][2];\n lhsp[k][i][3] = lhs[k][i][3] + dttz2 * speed[k+1][j][i];\n lhsp[k][i][4] = lhs[k][i][4];\n lhsm[k][i][0] = lhs[k][i][0];\n lhsm[k][i][1] = lhs[k][i][1] + dttz2 * speed[k-1][j][i];\n lhsm[k][i][2] = lhs[k][i][2];\n lhsm[k][i][3] = lhs[k][i][3] - dttz2 * speed[k+1][j][i];\n lhsm[k][i][4] = lhs[k][i][4];\n }\n }\n\n\n //---------------------------------------------------------------------\n // FORWARD ELIMINATION \n //---------------------------------------------------------------------\n for (k = 0; k <= grid_points[2]-3; k++) {\n k1 = k + 1;\n k2 = k + 2;\n for (i = 1; i <= nx2; i++) {\n fac1 = 1.0/lhs[k][i][2];\n lhs[k][i][3] = fac1*lhs[k][i][3];\n lhs[k][i][4] = fac1*lhs[k][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n }\n lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3];\n lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m];\n }\n lhs[k2][i][1] = lhs[k2][i][1] - lhs[k2][i][0]*lhs[k][i][3];\n lhs[k2][i][2] = lhs[k2][i][2] - lhs[k2][i][0]*lhs[k][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhs[k2][i][0]*rhs[k][j][i][m];\n }\n }\n }\n\n //---------------------------------------------------------------------\n // The last two rows in this grid block are a bit different, \n // since they for (not have two more rows available for the\n // elimination of off-diagonal entries\n //---------------------------------------------------------------------\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n for (i = 1; i <= nx2; i++) {\n fac1 = 1.0/lhs[k][i][2];\n lhs[k][i][3] = fac1*lhs[k][i][3];\n lhs[k][i][4] = fac1*lhs[k][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n }\n lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3];\n lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m];\n }\n\n //---------------------------------------------------------------------\n // scale the last row immediately\n //---------------------------------------------------------------------\n fac2 = 1.0/lhs[k1][i][2];\n for (m = 0; m < 3; m++) {\n rhs[k1][j][i][m] = fac2*rhs[k1][j][i][m];\n }\n }\n\n //---------------------------------------------------------------------\n // for (the u+c and the u-c factors \n //---------------------------------------------------------------------\n for (k = 0; k <= grid_points[2]-3; k++) {\n k1 = k + 1;\n k2 = k + 2;\n for (i = 1; i <= nx2; i++) {\n m = 3;\n fac1 = 1.0/lhsp[k][i][2];\n lhsp[k][i][3] = fac1*lhsp[k][i][3];\n lhsp[k][i][4] = fac1*lhsp[k][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3];\n lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4];\n rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m];\n lhsp[k2][i][1] = lhsp[k2][i][1] - lhsp[k2][i][0]*lhsp[k][i][3];\n lhsp[k2][i][2] = lhsp[k2][i][2] - lhsp[k2][i][0]*lhsp[k][i][4];\n rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsp[k2][i][0]*rhs[k][j][i][m];\n\n m = 4;\n fac1 = 1.0/lhsm[k][i][2];\n lhsm[k][i][3] = fac1*lhsm[k][i][3];\n lhsm[k][i][4] = fac1*lhsm[k][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3];\n lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4];\n rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m];\n lhsm[k2][i][1] = lhsm[k2][i][1] - lhsm[k2][i][0]*lhsm[k][i][3];\n lhsm[k2][i][2] = lhsm[k2][i][2] - lhsm[k2][i][0]*lhsm[k][i][4];\n rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsm[k2][i][0]*rhs[k][j][i][m];\n }\n }\n\n //---------------------------------------------------------------------\n // And again the last two rows separately\n //---------------------------------------------------------------------\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n for (i = 1; i <= nx2; i++) {\n m = 3;\n fac1 = 1.0/lhsp[k][i][2];\n lhsp[k][i][3] = fac1*lhsp[k][i][3];\n lhsp[k][i][4] = fac1*lhsp[k][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3];\n lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4];\n rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m];\n\n m = 4;\n fac1 = 1.0/lhsm[k][i][2];\n lhsm[k][i][3] = fac1*lhsm[k][i][3];\n lhsm[k][i][4] = fac1*lhsm[k][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3];\n lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4];\n rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m];\n\n //---------------------------------------------------------------------\n // Scale the last row immediately (some of this is overkill\n // if this is the last cell)\n //---------------------------------------------------------------------\n rhs[k1][j][i][3] = rhs[k1][j][i][3]/lhsp[k1][i][2];\n rhs[k1][j][i][4] = rhs[k1][j][i][4]/lhsm[k1][i][2];\n }\n\n\n //---------------------------------------------------------------------\n // BACKSUBSTITUTION \n //---------------------------------------------------------------------\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n for (i = 1; i <= nx2; i++) {\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3]*rhs[k1][j][i][m];\n }\n\n rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3]*rhs[k1][j][i][3];\n rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3]*rhs[k1][j][i][4];\n }\n\n //---------------------------------------------------------------------\n // Whether or not this is the last processor, we always have\n // to complete the back-substitution \n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // The first three factors\n //---------------------------------------------------------------------\n for (k = grid_points[2]-3; k >= 0; k--) {\n k1 = k + 1;\n k2 = k + 2;\n for (i = 1; i <= nx2; i++) {\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = rhs[k][j][i][m] - \n lhs[k][i][3]*rhs[k1][j][i][m] -\n lhs[k][i][4]*rhs[k2][j][i][m];\n }\n\n //-------------------------------------------------------------------\n // And the remaining two\n //-------------------------------------------------------------------\n rhs[k][j][i][3] = rhs[k][j][i][3] - \n lhsp[k][i][3]*rhs[k1][j][i][3] -\n lhsp[k][i][4]*rhs[k2][j][i][3];\n rhs[k][j][i][4] = rhs[k][j][i][4] - \n lhsm[k][i][3]*rhs[k1][j][i][4] -\n lhsm[k][i][4]*rhs[k2][j][i][4];\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,k1,k2,m, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/SP/tzetar.c", "omp_pragma_line": "#pragma omp parallel for default(shared) \\", "context_chars": 100, "text": "vel, zvel, r1, r2, r3, r4, r5;\n double btuz, ac2u, uzik1;\n\n if (timeron) timer_start(t_tzetar);\n private(i,j,k,t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1)\n for (k = 1; k <= nz2; k++) {\n for (j = 1; j <= ny2; j++) {\n for (i = 1; i <= nx2; i++) {\n xvel = us[k][j][i];\n yvel = vs[k][j][i];\n zvel = ws[k][j][i];\n ac = speed[k][j][i];\n\n ac2u = ac*ac;\n\n r1 = rhs[k][j][i][0];\n r2 = rhs[k][j][i][1];\n r3 = rhs[k][j][i][2];\n r4 = rhs[k][j][i][3];\n r5 = rhs[k][j][i][4]; \n\n uzik1 = u[k][j][i][0];\n btuz = bt * uzik1;\n\n t1 = btuz/ac * (r4 + r5);\n t2 = r3 + t1;\n t3 = btuz * (r4 - r5);\n\n rhs[k][j][i][0] = t2;\n rhs[k][j][i][1] = -uzik1*r2 + xvel*t2;\n rhs[k][j][i][2] = uzik1*r1 + yvel*t2;\n rhs[k][j][i][3] = zvel*t2 + t3;\n rhs[k][j][i][4] = uzik1*(-xvel*r2 + yvel*r1) + \n qs[k][j][i]*t2 + c2iv*ac2u*t1 + zvel*t3;\n }\n }\n } #pragma omp parallel for default(shared) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/SP/y_solve.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,j1,j2,m, \\", "context_chars": 100, "text": "ve()\n{\n int i, j, k, j1, j2, m;\n double ru1, fac1, fac2;\n\n if (timeron) timer_start(t_ysolve);\n ru1,fac1,fac2)\n for (k = 1; k <= nz2; k++) {\n lhsinitj(ny2+1, nx2);\n\n //---------------------------------------------------------------------\n // Computes the left hand side for the three y-factors \n //---------------------------------------------------------------------\n\n //---------------------------------------------------------------------\n // first fill the lhs for the u-eigenvalue \n //---------------------------------------------------------------------\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 0; j <= grid_points[1]-1; j++) {\n ru1 = c3c4*rho_i[k][j][i];\n cv[j] = vs[k][j][i];\n rhoq[j] = max(max(dy3+con43*ru1, dy5+c1c5*ru1), max(dymax+ru1, dy1));\n }\n\n for (j = 1; j <= grid_points[1]-2; j++) {\n lhs[j][i][0] = 0.0;\n lhs[j][i][1] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1];\n lhs[j][i][2] = 1.0 + c2dtty1 * rhoq[j];\n lhs[j][i][3] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1];\n lhs[j][i][4] = 0.0;\n }\n }\n\n //---------------------------------------------------------------------\n // add fourth order dissipation \n //---------------------------------------------------------------------\n for (i = 1; i <= grid_points[0]-2; i++) {\n j = 1;\n lhs[j][i][2] = lhs[j][i][2] + comz5;\n lhs[j][i][3] = lhs[j][i][3] - comz4;\n lhs[j][i][4] = lhs[j][i][4] + comz1;\n\n lhs[j+1][i][1] = lhs[j+1][i][1] - comz4;\n lhs[j+1][i][2] = lhs[j+1][i][2] + comz6;\n lhs[j+1][i][3] = lhs[j+1][i][3] - comz4;\n lhs[j+1][i][4] = lhs[j+1][i][4] + comz1;\n }\n\n for (j = 3; j <= grid_points[1]-4; j++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n lhs[j][i][0] = lhs[j][i][0] + comz1;\n lhs[j][i][1] = lhs[j][i][1] - comz4;\n lhs[j][i][2] = lhs[j][i][2] + comz6;\n lhs[j][i][3] = lhs[j][i][3] - comz4;\n lhs[j][i][4] = lhs[j][i][4] + comz1;\n }\n }\n\n for (i = 1; i <= grid_points[0]-2; i++) {\n j = grid_points[1]-3;\n lhs[j][i][0] = lhs[j][i][0] + comz1;\n lhs[j][i][1] = lhs[j][i][1] - comz4;\n lhs[j][i][2] = lhs[j][i][2] + comz6;\n lhs[j][i][3] = lhs[j][i][3] - comz4;\n\n lhs[j+1][i][0] = lhs[j+1][i][0] + comz1;\n lhs[j+1][i][1] = lhs[j+1][i][1] - comz4;\n lhs[j+1][i][2] = lhs[j+1][i][2] + comz5;\n }\n\n //---------------------------------------------------------------------\n // subsequently, for (the other two factors \n //---------------------------------------------------------------------\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n lhsp[j][i][0] = lhs[j][i][0];\n lhsp[j][i][1] = lhs[j][i][1] - dtty2 * speed[k][j-1][i];\n lhsp[j][i][2] = lhs[j][i][2];\n lhsp[j][i][3] = lhs[j][i][3] + dtty2 * speed[k][j+1][i];\n lhsp[j][i][4] = lhs[j][i][4];\n lhsm[j][i][0] = lhs[j][i][0];\n lhsm[j][i][1] = lhs[j][i][1] + dtty2 * speed[k][j-1][i];\n lhsm[j][i][2] = lhs[j][i][2];\n lhsm[j][i][3] = lhs[j][i][3] - dtty2 * speed[k][j+1][i];\n lhsm[j][i][4] = lhs[j][i][4];\n }\n }\n\n\n //---------------------------------------------------------------------\n // FORWARD ELIMINATION \n //---------------------------------------------------------------------\n for (j = 0; j <= grid_points[1]-3; j++) {\n j1 = j + 1;\n j2 = j + 2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n fac1 = 1.0/lhs[j][i][2];\n lhs[j][i][3] = fac1*lhs[j][i][3];\n lhs[j][i][4] = fac1*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n }\n lhs[j1][i][2] = lhs[j1][i][2] - lhs[j1][i][1]*lhs[j][i][3];\n lhs[j1][i][3] = lhs[j1][i][3] - lhs[j1][i][1]*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhs[j1][i][1]*rhs[k][j][i][m];\n }\n lhs[j2][i][1] = lhs[j2][i][1] - lhs[j2][i][0]*lhs[j][i][3];\n lhs[j2][i][2] = lhs[j2][i][2] - lhs[j2][i][0]*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhs[j2][i][0]*rhs[k][j][i][m];\n }\n }\n }\n\n //---------------------------------------------------------------------\n // The last two rows in this grid block are a bit different, \n // since they for (not have two more rows available for the\n // elimination of off-diagonal entries\n //---------------------------------------------------------------------\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n fac1 = 1.0/lhs[j][i][2];\n lhs[j][i][3] = fac1*lhs[j][i][3];\n lhs[j][i][4] = fac1*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n }\n lhs[j1][i][2] = lhs[j1][i][2] - lhs[j1][i][1]*lhs[j][i][3];\n lhs[j1][i][3] = lhs[j1][i][3] - lhs[j1][i][1]*lhs[j][i][4];\n for (m = 0; m < 3; m++) {\n rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhs[j1][i][1]*rhs[k][j][i][m];\n }\n //---------------------------------------------------------------------\n // scale the last row immediately \n //---------------------------------------------------------------------\n fac2 = 1.0/lhs[j1][i][2];\n for (m = 0; m < 3; m++) {\n rhs[k][j1][i][m] = fac2*rhs[k][j1][i][m];\n }\n }\n\n //---------------------------------------------------------------------\n // for (the u+c and the u-c factors \n //---------------------------------------------------------------------\n for (j = 0; j <= grid_points[1]-3; j++) {\n j1 = j + 1;\n j2 = j + 2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n m = 3;\n fac1 = 1.0/lhsp[j][i][2];\n lhsp[j][i][3] = fac1*lhsp[j][i][3];\n lhsp[j][i][4] = fac1*lhsp[j][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsp[j1][i][2] = lhsp[j1][i][2] - lhsp[j1][i][1]*lhsp[j][i][3];\n lhsp[j1][i][3] = lhsp[j1][i][3] - lhsp[j1][i][1]*lhsp[j][i][4];\n rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsp[j1][i][1]*rhs[k][j][i][m];\n lhsp[j2][i][1] = lhsp[j2][i][1] - lhsp[j2][i][0]*lhsp[j][i][3];\n lhsp[j2][i][2] = lhsp[j2][i][2] - lhsp[j2][i][0]*lhsp[j][i][4];\n rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhsp[j2][i][0]*rhs[k][j][i][m];\n\n m = 4;\n fac1 = 1.0/lhsm[j][i][2];\n lhsm[j][i][3] = fac1*lhsm[j][i][3];\n lhsm[j][i][4] = fac1*lhsm[j][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsm[j1][i][2] = lhsm[j1][i][2] - lhsm[j1][i][1]*lhsm[j][i][3];\n lhsm[j1][i][3] = lhsm[j1][i][3] - lhsm[j1][i][1]*lhsm[j][i][4];\n rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsm[j1][i][1]*rhs[k][j][i][m];\n lhsm[j2][i][1] = lhsm[j2][i][1] - lhsm[j2][i][0]*lhsm[j][i][3];\n lhsm[j2][i][2] = lhsm[j2][i][2] - lhsm[j2][i][0]*lhsm[j][i][4];\n rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhsm[j2][i][0]*rhs[k][j][i][m];\n }\n }\n\n //---------------------------------------------------------------------\n // And again the last two rows separately\n //---------------------------------------------------------------------\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n m = 3;\n fac1 = 1.0/lhsp[j][i][2];\n lhsp[j][i][3] = fac1*lhsp[j][i][3];\n lhsp[j][i][4] = fac1*lhsp[j][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsp[j1][i][2] = lhsp[j1][i][2] - lhsp[j1][i][1]*lhsp[j][i][3];\n lhsp[j1][i][3] = lhsp[j1][i][3] - lhsp[j1][i][1]*lhsp[j][i][4];\n rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsp[j1][i][1]*rhs[k][j][i][m];\n\n m = 4;\n fac1 = 1.0/lhsm[j][i][2];\n lhsm[j][i][3] = fac1*lhsm[j][i][3];\n lhsm[j][i][4] = fac1*lhsm[j][i][4];\n rhs[k][j][i][m] = fac1*rhs[k][j][i][m];\n lhsm[j1][i][2] = lhsm[j1][i][2] - lhsm[j1][i][1]*lhsm[j][i][3];\n lhsm[j1][i][3] = lhsm[j1][i][3] - lhsm[j1][i][1]*lhsm[j][i][4];\n rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsm[j1][i][1]*rhs[k][j][i][m];\n\n //---------------------------------------------------------------------\n // Scale the last row immediately \n //---------------------------------------------------------------------\n rhs[k][j1][i][3] = rhs[k][j1][i][3]/lhsp[j1][i][2];\n rhs[k][j1][i][4] = rhs[k][j1][i][4]/lhsm[j1][i][2];\n }\n\n\n //---------------------------------------------------------------------\n // BACKSUBSTITUTION \n //---------------------------------------------------------------------\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3]*rhs[k][j1][i][m];\n }\n\n rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3]*rhs[k][j1][i][3];\n rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3]*rhs[k][j1][i][4];\n }\n\n //---------------------------------------------------------------------\n // The first three factors\n //---------------------------------------------------------------------\n for (j = grid_points[1]-3; j >= 0; j--) {\n j1 = j + 1;\n j2 = j + 2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (m = 0; m < 3; m++) {\n rhs[k][j][i][m] = rhs[k][j][i][m] - \n lhs[j][i][3]*rhs[k][j1][i][m] -\n lhs[j][i][4]*rhs[k][j2][i][m];\n }\n\n //-------------------------------------------------------------------\n // And the remaining two\n //-------------------------------------------------------------------\n rhs[k][j][i][3] = rhs[k][j][i][3] - \n lhsp[j][i][3]*rhs[k][j1][i][3] -\n lhsp[j][i][4]*rhs[k][j2][i][3];\n rhs[k][j][i][4] = rhs[k][j][i][4] - \n lhsm[j][i][3]*rhs[k][j1][i][4] -\n lhsm[j][i][4]*rhs[k][j2][i][4];\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,j1,j2,m, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2)", "context_chars": 100, "text": " (*)[n2][n1])ou;\n\n int i3, i2, i1;\n\n double r1[M], r2[M];\n\n if (timeron) timer_start(T_psinv);\n for (i3 = 1; i3 < n3-1; i3++) {\n for (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]\n + r[i3-1][i2][i1] + r[i3+1][i2][i1];\n r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]\n + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];\n }\n for (i1 = 1; i1 < n1-1; i1++) {\n u[i3][i2][i1] = u[i3][i2][i1]\n + c[0] * r[i3][i2][i1]\n + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]\n + r1[i1] )\n + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );\n //--------------------------------------------------------------------\n // Assume c[3] = 0 (Enable line below if c[3] not= 0)\n //--------------------------------------------------------------------\n // + c[3] * ( r2[i1-1] + r2[i1+1] )\n //--------------------------------------------------------------------\n }\n }\n } #pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)", "context_chars": 100, "text": "e (*)[n2][n1])or;\n\n int i3, i2, i1;\n double u1[M], u2[M];\n\n if (timeron) timer_start(T_resid);\n for (i3 = 1; i3 < n3-1; i3++) {\n for (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]\n + u[i3-1][i2][i1] + u[i3+1][i2][i1];\n u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]\n + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];\n }\n for (i1 = 1; i1 < n1-1; i1++) {\n r[i3][i2][i1] = v[i3][i2][i1]\n - a[0] * u[i3][i2][i1]\n //-------------------------------------------------------------------\n // Assume a[1] = 0 (Enable 2 lines below if a[1] not= 0)\n //-------------------------------------------------------------------\n // - a[1] * ( u[i3][i2][i1-1] + u[i3][i2][i1+1]\n // + u1[i1] )\n //-------------------------------------------------------------------\n - a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )\n - a[3] * ( u2[i1-1] + u2[i1+1] );\n }\n }\n } #pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) \\", "context_chars": 100, "text": " d2 = 2;\n } else {\n d2 = 1;\n }\n\n if (m3k == 3) {\n d3 = 2;\n } else {\n d3 = 1;\n }\n\n private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)\n for (j3 = 1; j3 < m3j-1; j3++) {\n i3 = 2*j3-d3;\n for (j2 = 1; j2 < m2j-1; j2++) {\n i2 = 2*j2-d2;\n\n for (j1 = 1; j1 < m1j; j1++) {\n i1 = 2*j1-d1;\n x1[i1] = r[i3+1][i2 ][i1] + r[i3+1][i2+2][i1]\n + r[i3 ][i2+1][i1] + r[i3+2][i2+1][i1];\n y1[i1] = r[i3 ][i2 ][i1] + r[i3+2][i2 ][i1]\n + r[i3 ][i2+2][i1] + r[i3+2][i2+2][i1];\n }\n\n for (j1 = 1; j1 < m1j-1; j1++) {\n i1 = 2*j1-d1;\n y2 = r[i3 ][i2 ][i1+1] + r[i3+2][i2 ][i1+1]\n + r[i3 ][i2+2][i1+1] + r[i3+2][i2+2][i1+1];\n x2 = r[i3+1][i2 ][i1+1] + r[i3+1][i2+2][i1+1]\n + r[i3 ][i2+1][i1+1] + r[i3+2][i2+1][i1+1];\n s[j3][j2][j1] =\n 0.5 * r[i3+1][i2+1][i1+1]\n + 0.25 * (r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)\n + 0.125 * (x1[i1] + x1[i1+2] + y2)\n + 0.0625 * (y1[i1] + y1[i1+2]);\n }\n }\n } #pragma omp parallel for default(shared) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)", "context_chars": 100, "text": "M], z2[M], z3[M];\n\n if (timeron) timer_start(T_interp);\n if (n1 != 3 && n2 != 3 && n3 != 3) {\n for (i3 = 0; i3 < mm3-1; i3++) {\n for (i2 = 0; i2 < mm2-1; i2++) {\n for (i1 = 0; i1 < mm1; i1++) {\n z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];\n z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];\n z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];\n }\n\n for (i1 = 0; i1 < mm1-1; i1++) {\n u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]\n + z[i3][i2][i1];\n u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]\n + 0.5 * (z[i3][i2][i1+1] + z[i3][i2][i1]);\n }\n for (i1 = 0; i1 < mm1-1; i1++) {\n u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]\n + 0.5 * z1[i1];\n u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]\n + 0.25 * (z1[i1] + z1[i1+1]);\n }\n for (i1 = 0; i1 < mm1-1; i1++) {\n u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]\n + 0.5 * z2[i1];\n u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]\n + 0.25 * (z2[i1] + z2[i1+1]);\n }\n for (i1 = 0; i1 < mm1-1; i1++) {\n u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]\n + 0.25 * z3[i1];\n u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]\n + 0.125 * (z3[i1] + z3[i1+1]);\n }\n }\n } #pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i2,i3,x1,xx,rdummy) \\", "context_chars": 100, "text": "-------\n // fill array\n //---------------------------------------------------------------------\n shared(e2,e3,d1,a1)\n for (i3 = 1; i3 < e3; i3++) {\n x1 = starts[i3];\n for (i2 = 1; i2 < e2; i2++) {\n xx = x1;\n vranlc(d1, &xx, a, &(z[i3][i2][1]));\n rdummy = randlc(&x1, a1);\n }\n } #pragma omp parallel for default(shared) private(i2,i3,x1,xx,rdummy) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3)", "context_chars": 100, "text": " i >= 0; i--) {\n printf(\" %4d\", jg[0][i][1]);\n if (++cnt % 10 == 0) printf(\"\\n\");\n }\n */\n\n for (i3 = 0; i3 < n3; i3++) {\n for (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n z[i3][i2][i1] = 0.0;\n }\n }\n } #pragma omp parallel for default(shared) private(i1,i2,i3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3)", "context_chars": 100, "text": "oz, int n1, int n2, int n3)\n{\n double (*z)[n2][n1] = (double (*)[n2][n1])oz;\n\n int i1, i2, i3;\n\n for (i3 = 0; i3 < n3; i3++) {\n for (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n z[i3][i2][i1] = 0.0;\n }\n }\n } #pragma omp parallel for default(shared) private(i1,i2,i3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/IS/is.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "; i++) {\n bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS);\n }\n\n for( i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k,k1) schedule(static,1)", "context_chars": 100, "text": "ETS\n\n /* Buckets are already sorted. Sorting keys within each bucket */\n#ifdef SCHED_CYCLIC\n #else\n #pragma omp parallel for private(i,j,k,k1) schedule(dynamic)\n\n for( j=0; j< NUM_BUCKETS; j++ ) {\n\n k1 = (j > 0)? bucket_ptrs[j-1] : 0;\n for ( i = k1; i < bucket_ptrs[j]; i++ ) {\n k = --key_buff_ptr_global[key_buff2[i]];\n key_array[k] = key_buff2[i];\n }\n } #pragma omp parallel for private(i,j,k,k1) schedule(static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k,k1) schedule(dynamic)", "context_chars": 100, "text": " */\n#ifdef SCHED_CYCLIC\n #pragma omp parallel for private(i,j,k,k1) schedule(static,1)\n#else\n for( j=0; j< NUM_BUCKETS; j++ ) {\n\n k1 = (j > 0)? bucket_ptrs[j-1] : 0;\n for ( i = k1; i < bucket_ptrs[j]; i++ ) {\n k = --key_buff_ptr_global[key_buff2[i]];\n key_array[k] = key_buff2[i];\n }\n } #pragma omp parallel for private(i,j,k,k1) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenACCUserGroup/openacc-users-group/Contributed_Sample_Codes/NAS_SHOC_OpenACC_2.5/NPB-OMP-C/IS/is.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:j)", "context_chars": 100, "text": "endif\n\n\n/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */\n\n j = 0;\n for( i=1; i key_array[i] )\n j++;\n\n if( j != 0 )\n printf( \"Full_verify: number of keys out of sort: %ld\\n\", (long)j );\n else\n passed_verification++;\n\n}\n\n\n\n\n/*****************************************************************/\n/************* R A N K ****************/\n/*****************************************************************/\n\n\nvoid rank( int iteration )\n{\n\n INT_TYPE i, k;\n INT_TYPE *key_buff_ptr, *key_buff_ptr2;\n\n#ifdef USE_BUCKETS\n int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;\n INT_TYPE num_bucket_keys = (1L << shift);\n\n\n\n key_array[iteration] = iteration;\n key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;\n\n\n/* Determine where the partial verify test keys are, load into */\n/* top of array bucket_size */\n for( i=0; i> shift]++;\n\n/* Accumulative bucket sizes are the bucket pointers.\n These are global sizes accumulated upon to each bucket */\n bucket_ptrs[0] = 0;\n for( k=0; k< myid; k++ ) \n bucket_ptrs[0] += bucket_size[k][0];\n\n for( i=1; i< NUM_BUCKETS; i++ ) { \n bucket_ptrs[i] = bucket_ptrs[i-1];\n for( k=0; k< myid; k++ )\n bucket_ptrs[i] += bucket_size[k][i];\n for( k=myid; k< num_procs; k++ )\n bucket_ptrs[i] += bucket_size[k][i-1];\n }\n\n\n/* Sort into appropriate bucket */\n #pragma omp for schedule(static)\n for( i=0; i> shift]++] = k;\n }\n\n/* The bucket pointers now point to the final accumulated sizes */\n if (myid < num_procs-1) {\n for( i=0; i< NUM_BUCKETS; i++ )\n for( k=myid+1; k< num_procs; k++ )\n bucket_ptrs[i] += bucket_size[k][i];\n }\n\n\n/* Now, buckets are sorted. We only need to sort keys inside\n each bucket, which can be done in parallel. Because the distribution\n of the number of keys in the buckets is Gaussian, the use of\n a dynamic schedule should improve load balance, thus, performance */\n\n#ifdef SCHED_CYCLIC\n #pragma omp for schedule(static,1)\n#else\n #pragma omp for schedule(dynamic)\n\n for( i=0; i< NUM_BUCKETS; i++ ) {\n\n/* Clear the work array section associated with each bucket */\n k1 = i * num_bucket_keys;\n k2 = k1 + num_bucket_keys;\n for ( k = k1; k < k2; k++ )\n key_buff_ptr[k] = 0;\n\n/* Ranking of all keys occurs in this section: */\n\n/* In this section, the keys themselves are used as their \n own indexes to determine how many of each there are: their\n individual population */\n m = (i > 0)? bucket_ptrs[i-1] : 0;\n for ( k = m; k < bucket_ptrs[i]; k++ )\n key_buff_ptr[key_buff_ptr2[k]]++; /* Now they have individual key */\n /* population */\n\n/* To obtain ranks of each key, successively add the individual key\n population, not forgetting to add m, the total of lesser keys,\n to the first key population */\n key_buff_ptr[k1] += m;\n for ( k = k1+1; k < k2; k++ )\n key_buff_ptr[k] += key_buff_ptr[k-1];\n\n }\n\n#else /*USE_BUCKETS*/\n\n\n work_buff = key_buff1_aptr[myid];\n\n\n/* Clear the work array */\n for( i=0; i #pragma omp parallel for reduction(+:j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/TriangleCounting/ptc.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,4096) default(none) \\", "context_chars": 100, "text": "oc(nvtxs, \"iperm\"); /* iperm[new-vtx-num] => old-vtx-num */\n\n /* Determine maxdegree/csrange */\n shared(nvtxs, xadj) \\\n reduction(max: maxdegree)\n for (vi=0; vi=dstart; di--) { \n counts = gcounts + (nthreads-1)*csrange;\n for (ti=nthreads-1; ti>=0; ti--) {\n psum -= counts[di];\n counts[di] = psum;\n counts -= csrange;\n }\n }\n #pragma omp barrier\n\n /* Create the perm/iperm arrays and the nxadj array of the re-ordered graph */\n counts = gcounts + mytid*csrange;\n\n /* TODO: This can be optimized by pre-sorting the per-thread vertices according \n to their degree and processing them in increasing degree order */\n for (vi=vistart; vi=vistart; vi--) { \n psum -= nxadj[vi];\n nxadj[vi] = psum;\n }\n #pragma omp barrier\n \n /* Compute the chunk-based partitioning of the work for the reordered/sorted graph */\n chunksize = 1+psums[nthreads-1]/(100*nthreads);\n for (nchunks=0, psum=0, vi=vistart; vi= chunksize) {\n nchunks++;\n psum = 0;\n }\n }\n psums[mytid] = nchunks+1;\n\n #pragma omp barrier\n #pragma omp single\n for (ti=1; ti=vistart; vi--) {\n if ((psum += nxadj[vi+1]-nxadj[vi]) >= chunksize) {\n chunkptr[--nchunks] = vi;\n psum = 0;\n }\n }\n if (mytid == 0)\n chunkptr[0] = 0;\n #pragma omp barrier\n\n nchunks = psums[nthreads-1]; /* this is the total # of chunks */\n /*\n #pragma omp single\n {\n for (vi=0; vi=0; ci--) {\n for (vi=chunkptr[ci]; vi 1)\n gk_i32sorti(nedges, buffer); /* sort adjncy list */\n }\n }\n\n } #pragma omp parallel for schedule(static,4096) default(none) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/TriangleCounting/ptc.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1024) \\", "context_chars": 100, "text": "lt->timer_tc);\n\n /* populate uxadj[] and determine the size of the hash-map */\n startv = nvtxs;\n default(none) \\\n shared(nvtxs, xadj, adjncy, uxadj) \\\n private(vj, ei, ej) \\\n reduction(max: maxhmsize) \\\n reduction(min: startv)\n for (vi=nvtxs-1; vi>=0; vi--) {\n for (ei=xadj[vi+1]-1; adjncy[ei]>vi; ei--); \n uxadj[vi] = ei;\n maxhmsize = gk_max(maxhmsize, (int32_t)(xadj[vi+1]-uxadj[vi]));\n startv = (uxadj[vi] != xadj[vi] ? vi : startv);\n\n /* flip the order of Adj(vi)'s upper triangular adjacency list */\n for (ej=xadj[vi+1]-1; ei #pragma omp parallel for schedule(dynamic,1024) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/util.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(max: max_val)", "context_chars": 100, "text": "t const * const restrict array,\n int64_t N)\n{\n assert(N > 0);\n\n int32_t max_val = array[0];\n\n for(int64_t i=0; i < N; ++i) {\n max_val = gk_max(max_val, array[i]);\n } #pragma omp parallel for schedule(static) reduction(max: max_val)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/util.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(max: degree)", "context_chars": 100, "text": "n max_val;\n}\n\n\nssize_t graph_max_degree(\n gk_graph_t const * const G)\n{\n ssize_t degree = 0;\n\n for(int32_t v=0; v < G->nvtxs; ++v) {\n degree = gk_max(degree, G->xadj[v+1] - G->xadj[v]);\n } #pragma omp parallel for schedule(static) reduction(max: degree)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/util.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "T->adjncy\");\n\n /* get counts for xadj */\n par_memset(T->xadj, 0, (nvtxs+1) * sizeof(*T->xadj));\n for(int64_t e=0; e < nedges; ++e) {\n #pragma omp atomic\n T->xadj[G->adjncy[e]]++;\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/util.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "32_t v=0; v < nvtxs; ++v) {\n xadj[1+v] = graph->xadj[v];\n }\n\n /* shift adjncy values by 1 */\n for(ssize_t e=0; e < nedges; ++e) {\n ++(graph->adjncy[e]);\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/util.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+:nnz)", "context_chars": 100, "text": "int64_t count_nnz(\n int64_t const N,\n int32_t const * const supports)\n{\n int64_t nnz = 0;\n\n for(int64_t e = 0; e < N; ++e) {\n if(supports[e] > 0) {\n ++nnz;\n }\n } #pragma omp parallel for schedule(static) reduction(+:nnz)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/util.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+:total_support)", "context_chars": 100, "text": "support(\n int64_t const N,\n int32_t const * const supports)\n{\n int64_t total_support = 0;\n\n for(int64_t e = 0; e < N; ++e) {\n if(supports[e] > 0) {\n total_support += supports[e];\n }\n } #pragma omp parallel for schedule(static) reduction(+:total_support)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/kt_sbucket.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "st));\n ++big_slist; /* +1 to allow slist[-1] to be valid */\n\n /* allocate each thread-bucket */\n for(int bucket=0; bucket < nbuckets; ++bucket) {\n support_bucket_t * sbucket = &(sbuckets[bucket]);\n sbucket->nsupports = nsupports;\n sbucket->nowned_edges = 0;\n sbucket->slist = big_slist;\n sbucket->list_head =\n gk_malloc(sbucket->nsupports * sizeof(*sbucket->list_head), \"list_head\");\n ssize_t * const shead = sbucket->list_head;\n for(int32_t s=0; s < sbucket->nsupports; ++s) {\n shead[s] = -1;\n }\n } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/and.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, DYNAMIC_CHUNK)", "context_chars": 100, "text": ">ktmax = ktmax;\n vault->ktedges = gk_malloc(xadj[nvtxs] * sizeof(*vault->ktedges), \"ktedges\");\n\n for(int32_t v=0; v < nvtxs; ++v) {\n for(ssize_t e = xadj[v]; e < xadj[v+1]; ++e) {\n /* -1 to revert the shift-by-one ordering */\n int32_t const v1 = vault->iperm[v];\n int32_t const v2 = vault->iperm[adjncy[e]];\n\n vault->ktedges[e].vi = gk_min(v1, v2);\n vault->ktedges[e].vj = gk_max(v1, v2);\n\n vault->ktedges[e].k = supports[e];\n }\n } #pragma omp parallel for schedule(dynamic, DYNAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/and.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, DYNAMIC_CHUNK)", "context_chars": 100, "text": "to 'u' vertices in ugraph. */\n int32_t * u_vtxs = gk_malloc(nedges * sizeof(*u_vtxs), \"u_vtxs\");\n for(int32_t v=0; v < nvtxs; ++v) {\n for(ssize_t e = vault->ugraph->xadj[v]; e < vault->ugraph->xadj[v+1]; ++e) {\n u_vtxs[e] = v;\n }\n } #pragma omp parallel for schedule(dynamic, DYNAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/and.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, DYNAMIC_CHUNK)", "context_chars": 100, "text": "graph */\n int64_t * lgraph_maps = gk_malloc(nedges * sizeof(*lgraph_maps),\n \"lgraph_maps\");\n for(int32_t v=0; v < nvtxs; ++v) {\n for(ssize_t le = vault->lgraph->xadj[v]; le < vault->lgraph->xadj[v+1];\n ++le) {\n int32_t const u = vault->lgraph->adjncy[le];\n /* find corresponding edge in ugraph */\n for(ssize_t ue = vault->ugraph->xadj[u]; ue < vault->ugraph->xadj[u+1];\n ++ue) {\n if(vault->ugraph->adjncy[ue] == v) {\n lgraph_maps[le] = ue;\n }\n }\n }\n } #pragma omp parallel for schedule(dynamic, DYNAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/and.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "gk_malloc(nedges * sizeof(*need_update),\n \"need_update_new\");\n\n /* reset update messages */\n for(int64_t e=0; e < nedges; ++e) {\n need_update[e] = 1;\n need_update_new[e] = 0;\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/and.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, DYNAMIC_CHUNK) \\", "context_chars": 100, "text": "g);\n bool done = false;\n /*\n * Main loop.\n */\n while(!done) {\n int64_t nchanges = 0;\n\n reduction(+: nchanges)\n for(int64_t e=0; e < nedges; ++e) {\n if(!need_update[e]) {\n continue;\n }\n\n int const tid = omp_get_thread_num();\n int32_t const new_support = p_update_edge(vault->lgraph, vault->ugraph,\n lgraph_maps, supports, triangle_buf[tid], h_index[tid], h_index_buf[tid],\n e, u_vtxs[e], need_update_new);\n\n if(supports[e] != new_support) {\n /* benign race condition as long as writes to int32_t are atomic */\n supports[e] = new_support;\n ++nchanges;\n }\n\n /* reset update */\n need_update[e] = 0;\n } #pragma omp parallel for schedule(dynamic, DYNAMIC_CHUNK) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/and.c", "omp_pragma_line": "#pragma omp parallel for reduction(max: max_ktruss)", "context_chars": 100, "text": "mp;\n } /* end main loop */\n gk_stopwctimer(vault->timer_ktpeeling);\n\n\n int32_t max_ktruss = 0;\n for(int64_t e=0; e < nedges; ++e) {\n /* +2 because of the k-truss definition... */\n supports[e] += 2;\n max_ktruss = gk_max(max_ktruss, supports[e]);\n } #pragma omp parallel for reduction(max: max_ktruss)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/kt.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+:total_sup)", "context_chars": 100, "text": " shead[k] = ti;\n slist[ti].peid = -1;\n\n\n /* add up sups[:] */\n int64_t total_sup = 0;\n for(int64_t e = 0; e < nedges; ++e) {\n if(sups[e] >= 0) {\n total_sup += sups[e];\n }\n } #pragma omp parallel for schedule(static) reduction(+:total_sup)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/kt.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction(+:total_sup)", "context_chars": 100, "text": "goto BACK;\n }\n\n gk_stopwctimer(timer_currk);\n\n /* add up sups[:] */\n total_sup = 0;\n for(int64_t e = 0; e < nedges; ++e) {\n if(sups[e] >= 0) {\n total_sup += sups[e];\n }\n } #pragma omp parallel for schedule(static) reduction(+:total_sup)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/multistage-peeling.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "2_t * const restrict supports = dgraph->supports;\n\n /* initialize the start of each adj. list */\n for(int32_t vi=0; vi < nvtxs; ++vi) {\n dgraph->xaii[vi].start = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/KarypisLab/K-Truss/src/multistage-peeling.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "s after peeling: %\"PRId64\"\\n\", ntriangles);\n\n /* adjust for */\n if(params->outfile != NULL) {\n for(int64_t e=0; e < nedges; ++e) {\n dgraph->supports[e] += 2;\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/kaserashi/Parallelization-of-Smith-Waterman-Algorithm/parallelcode.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ray[i][0]=0;\n\t}\n\n\tcompval = 0;\n//parallelization of for loop to minimizethe time if done serially \n\tfor(i = 1; i <= lenA; ++i)\n\t{\t\n\t #pragma omp parallel for \n\t for(j = 1; j <= lenB; ++j)\n\t {\t\n\t\t\tif(FASTA1[i-1] == FASTA2[j-1])\n\t\t\t{\t\t\t\t\n\t\t\t\tcompval = (SWArray[i-1][j-1] + Match);\t\n\n\t\t\t}\n\t\t\tif(compval < ((SWArray[i-1][j]) + Gap))\n\t\t\t{\t \n\t\t\t\tcompval = ((SWArray[i-1][j]) + Gap);\t\t\n\t\t\t}\n\t\t\t\n\n\t\t\tif(compval<(SWArray[i][j-1]+Gap))\n\t\t\t{\t\n\t\t\t compval=((SWArray[i][j-1])+Gap); \n\t\t\t}\n\t\t\n\n\t\t\tif(compval < 0)\n\t\t\t{\n\t\t\t\tcompval = 0;\n\t\t\t}\n\n\t\t\t\n\n\t\t\tif(FASTA1[i-1] != FASTA2[j-1])\n\t\t\t{\t\t\t\t\t\n\n\t\t\t\tif(compval < (SWArray[i-1][j-1] + MissMatch))\n\t\t\t\t{\t\n\t\t\t\t\tcompval = SWArray[i-1][j-1] + MissMatch;\n \t\t}\n\n\t\t\t\tif(compval < ((SWArray[i-1][j]) + Gap))\n\t\t\t\t{\t \n\n\t\t\t\t\tcompval = ((SWArray[i-1][j]) + Gap);\t\n\t\t\t\t}\n\t\n\n\t\t\t\tif(compval < ((SWArray[i][j-1]) + Gap))\n\t\t\t\t{\t\n\n\t\t\t\t\tcompval = ((SWArray[i][j-1]) + Gap); \n\t\t\t\t}\n\t\t\t\n\n\t\t\t\tif(compval < 0)\n\t\t\t\t{\n\t\t\t\t\tcompval = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t\tSWArray[i][j] = compval;\n\t\t\tcompval = 0;\n\t\t}\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/kaserashi/Parallelization-of-Smith-Waterman-Algorithm/parallelcode.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " minimizethe time if done serially \n\t#pragma omp parallel for \n\tfor(i = 1; i <= lenA; ++i)\n\t{\t\n\t for(j = 1; j <= lenB; ++j)\n\t {\t\n\t\t\tif(FASTA1[i-1] == FASTA2[j-1])\n\t\t\t{\t\t\t\t\n\t\t\t\tcompval = (SWArray[i-1][j-1] + Match);\t\n\n\t\t\t}\n\t\t\tif(compval < ((SWArray[i-1][j]) + Gap))\n\t\t\t{\t \n\t\t\t\tcompval = ((SWArray[i-1][j]) + Gap);\t\t\n\t\t\t}\n\t\t\t\n\n\t\t\tif(compval<(SWArray[i][j-1]+Gap))\n\t\t\t{\t\n\t\t\t compval=((SWArray[i][j-1])+Gap); \n\t\t\t}\n\t\t\n\n\t\t\tif(compval < 0)\n\t\t\t{\n\t\t\t\tcompval = 0;\n\t\t\t}\n\n\t\t\t\n\n\t\t\tif(FASTA1[i-1] != FASTA2[j-1])\n\t\t\t{\t\t\t\t\t\n\n\t\t\t\tif(compval < (SWArray[i-1][j-1] + MissMatch))\n\t\t\t\t{\t\n\t\t\t\t\tcompval = SWArray[i-1][j-1] + MissMatch;\n \t\t}\n\n\t\t\t\tif(compval < ((SWArray[i-1][j]) + Gap))\n\t\t\t\t{\t \n\n\t\t\t\t\tcompval = ((SWArray[i-1][j]) + Gap);\t\n\t\t\t\t}\n\t\n\n\t\t\t\tif(compval < ((SWArray[i][j-1]) + Gap))\n\t\t\t\t{\t\n\n\t\t\t\t\tcompval = ((SWArray[i][j-1]) + Gap); \n\t\t\t\t}\n\t\t\t\n\n\t\t\t\tif(compval < 0)\n\t\t\t\t{\n\t\t\t\t\tcompval = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t\tSWArray[i][j] = compval;\n\t\t\tcompval = 0;\n\t\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rohinarora/OpenMP/Dense_MatMul_OpenMP/M-M_blocked_omp.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k, kk, jj, sum) shared(a, b, c, en) default(none) collapse(3) schedule(dynamic, 4)", "context_chars": 100, "text": "is is the only portion of the code you should modify to improve performance. */\n //double x = 0;\n{\n for (kk = 0; kk < en; kk += B)\n for (jj = 0; jj < en; jj += B)\n for (i = 0; i < M; i++)\n for (j = jj; j < jj + B; j++)\n {\n sum = c[i][j];\n for (k = kk; k < kk + B; k=k+4)\n {\n //sum += a[i][k] * b[k][j];\n sum += a[i][k+0] * b[k+0][j];\n sum += a[i][k+1] * b[k+1][j];\n sum += a[i][k+2] * b[k+2][j];\n sum += a[i][k+3] * b[k+3][j];\n }\n c[i][j] = sum;\n }\n } #pragma omp parallel for private(i, j, k, kk, jj, sum) shared(a, b, c, en) default(none) collapse(3) schedule(dynamic, 4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rohinarora/OpenMP/Dense_MatMul_OpenMP/M-M_omp.c", "omp_pragma_line": "#pragma omp parallel for private(x, i, j, k)", "context_chars": 100, "text": "This is the only portion of the code you should modify to improve performance. */\n double x = 0;\n{\n for (i = 0; i < M; i++)\n {\n for (j = 0; j < M; j++)\n {\n x = 0;\n for (k = 0; k < M; k++)\n {\n x += a[i][k] * b[k][j];\n }\n c[i][j] = x;\n }\n }\n } #pragma omp parallel for private(x, i, j, k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rohinarora/OpenMP/Code/src/3_mandel_fix.c", "omp_pragma_line": "#pragma omp parallel for default(shared) firstprivate(eps) private(c, i, j)", "context_chars": 100, "text": "ains the Mandelbrot set,\n // testing each point to see whether it is inside or outside the set.\n\nfor (i = 0; i < NPOINTS; i++)\n {\n for (j = 0; j < NPOINTS; j++)\n {\n c.r = -2.0 + 2.5 * (double)(i) / (double)(NPOINTS) + eps;\n c.i = 1.125 * (double)(j) / (double)(NPOINTS) + eps;\n testpoint(c);\n }\n } #pragma omp parallel for default(shared) firstprivate(eps) private(c, i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rohinarora/OpenMP/Code/src/2_parallel_pi_v4_2.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ \\", "context_chars": 100, "text": "\n\tstep = 1.0 / (double)num_steps;\n\tomp_set_num_threads(NUM_THREADS);\n\tstart_time = omp_get_wtime();\n: sum) private(x, i)\n\t{\n\t\tfor (i = 1; i <= num_steps; i++)\n\t\t{\n\t\t\tx = (i - 0.5) * step;\t\t// or (i+0.5)*step; if i starts from 0\n\t\t\tsum += 4.0 / (1.0 + x * x); // 4.0 and 1.0 ensure this is double\n\t\t}\n\t} #pragma omp parallel for reduction(+ \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rohinarora/OpenMP/Code/src/3_mandel_bug.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(c,eps)", "context_chars": 100, "text": "ntains the Mandelbrot set,\n// testing each point to see whether it is inside or outside the set.\n\nfor (i=0; i #pragma omp parallel for default(shared) private(c,eps)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/XiangRongLin/grayscale-conversion/cpu/algorithms/memory_simd_sse.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_thread_aligned = ((int)pixel_per_thread_unaligned / floats_per_operation) * floats_per_operation;\n\nfor (int thread = 0; thread < threads; thread++)\n {\n int end;\n if (thread + 1 == threads)\n {\n end = ((int)size / floats_per_operation) * floats_per_operation;\n }\n else\n {\n end = pixel_per_thread_aligned * (thread + 1);\n }\n\n __m128i r7_r6_r5_r4_r3_r2_r1_r0;\n __m128i g7_g6_g5_g4_g3_g2_g1_g0;\n __m128i b7_b6_b5_b4_b3_b2_b1_b0;\n\n\n\n for (int i = pixel_per_thread_aligned * thread; i < end; i += floats_per_operation)\n {\n __m128i r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0 = _mm_loadu_si128((__m128i *)&img[i * channels]); //Unaligned load of 16 uint8 elements\n __m128i b7_g7_r7_b6_g6_r6_b5_g5 = _mm_loadu_si128((__m128i *)&img[i * channels + 16]); //Unaligned load of (only) 8 uint8 elements (lower half of XMM register).\n\n //Separate RGB, and put together R elements, G elements and B elements (together in same XMM register).\n //Result is also unpacked from uint8 to uint16 elements.\n GatherRGBx8(r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0,\n b7_g7_r7_b6_g6_r6_b5_g5,\n &r7_r6_r5_r4_r3_r2_r1_r0,\n &g7_g6_g5_g4_g3_g2_g1_g0,\n &b7_b6_b5_b4_b3_b2_b1_b0);\n\n //Calculate 8 Y elements.\n __m128i y7_y6_y5_y4_y3_y2_y1_y0 = Rgb2Yx8(r7_r6_r5_r4_r3_r2_r1_r0,\n g7_g6_g5_g4_g3_g2_g1_g0,\n b7_b6_b5_b4_b3_b2_b1_b0);\n\n //Pack uint16 elements to 16 uint8 elements (put result in single XMM register). Only lower 8 uint8 elements are relevant.\n __m128i j7_j6_j5_j4_j3_j2_j1_j0 = _mm_packus_epi16(y7_y6_y5_y4_y3_y2_y1_y0, y7_y6_y5_y4_y3_y2_y1_y0);\n\n //Store 8 elements of Y in row Y0, and 8 elements of Y in row Y1.\n _mm_storel_epi64((__m128i *)&result[i], j7_j6_j5_j4_j3_j2_j1_j0);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/XiangRongLin/grayscale-conversion/cpu/algorithms/openmp_baseline.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "line(unsigned char *img, int width, int height, int channels, int threads, unsigned char *result)\n{\nfor (int x = 0; x < width; x++)\n {\n for (int y = 0; y < height; y++)\n {\n result[y * width + x] =\n 0.2126 * img[(y * width + x) * channels] // red\n + 0.7152 * img[(y * width + x) * channels + 1] // green\n + 0.0722 * img[(y * width + x) * channels + 2]; // blue\n }\n } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/XiangRongLin/grayscale-conversion/cpu/algorithms/memory.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nnels, int threads, unsigned char *result)\n{\n int pixel_per_thread = (width * height) / threads;\nfor (int thread = 0; thread < threads; thread++)\n {\n int end;\n if (thread + 1 == threads)\n {\n end = width * height;\n }\n else\n {\n end = pixel_per_thread * (thread + 1);\n }\n\n for (int i = pixel_per_thread * thread; i < end; i++)\n {\n result[i] =\n 0.2126 * img[(i * channels)] // red\n + 0.7152 * img[(i * channels) + 1] // green\n + 0.0722 * img[(i * channels) + 2]; // blue\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/XiangRongLin/grayscale-conversion/cpu/algorithms/memory_simd_fma.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "0.7152, 0.7152, 0.7152, 0.7152);\n __m128 b_factor = _mm_set_ps(0.0722, 0.0722, 0.0722, 0.0722);\n\nfor (int thread = 0; thread < threads; thread++)\n {\n int end;\n if (thread + 1 == threads)\n {\n end = ((int)size / floats_per_operation) * floats_per_operation;\n }\n else\n {\n end = pixel_per_thread_aligned * (thread + 1);\n }\n\n __m128 r_vector, g_vector, b_vector, gray_vector;\n __m128i gray_vector_int;\n for (int i = pixel_per_thread_aligned * thread; i < end; i += floats_per_operation)\n {\n r_vector = _mm_set_ps(img[(i * channels)], img[(i + 1) * channels], img[(i + 2) * channels], img[(i + 3) * channels]);\n g_vector = _mm_set_ps(img[(i * channels) + 1], img[(i + 1) * channels + 1], img[(i + 2) * channels + 1], img[(i + 3) * channels + 1]);\n b_vector = _mm_set_ps(img[(i * channels) + 2], img[(i + 1) * channels + 2], img[(i + 2) * channels + 2], img[(i + 3) * channels + 2]);\n\n // calculate gray value with FMA\n gray_vector = _mm_setzero_ps();\n gray_vector = _mm_fmadd_ps(r_vector, r_factor, gray_vector);\n gray_vector = _mm_fmadd_ps(g_vector, g_factor, gray_vector);\n gray_vector = _mm_fmadd_ps(b_vector, b_factor, gray_vector);\n\n // convert float to int and store it\n // https://stackoverflow.com/q/29856006\n gray_vector_int = _mm_cvtps_epi32(gray_vector);\n gray_vector_int = _mm_packus_epi32(gray_vector_int, gray_vector_int);\n gray_vector_int = _mm_packus_epi16(gray_vector_int, gray_vector_int);\n\n *(int *)(&result[i]) = _mm_cvtsi128_si32(gray_vector_int);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/XiangRongLin/grayscale-conversion/cpu/algorithms/memory_simd_avx.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "er_thread_aligned = ((int)pixel_per_thread_unaligned / pixel_per_iteration) * pixel_per_iteration;\n\nfor (int thread = 0; thread < threads; thread++)\n {\n int end;\n if (thread + 1 == threads)\n {\n end = ((int)size / pixel_per_iteration) * pixel_per_iteration;\n }\n else\n {\n end = pixel_per_thread_aligned * (thread + 1);\n }\n\n __m256i rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0;\n __m256i gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0;\n __m256i bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0;\n\n for (int i = pixel_per_thread_aligned * thread; i < end; i += pixel_per_iteration)\n {\n const int pixel_index = i * channels;\n __m256i gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0 = _mm256_load_si256((__m256i *)&img[pixel_index]);\n // Load the next 32 bytes, of which on the first 16 are used\n __m256i bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA = _mm256_load_si256((__m256i *)&img[pixel_index + 32]);\n\n //Separate RGB, and put together R elements, G elements and B elements (together in same XMM register).\n //Result is also unpacked from uint8 to uint16 elements.\n GatherRGBx8_avx(gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0,\n bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA,\n &rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0,\n &gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0,\n &bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0);\n\n //Calculate 16 Y elements.\n __m256i y7_y6_y5_y4_y3_y2_y1_y0 = Rgb2Yx8_avx(rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0,\n gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0,\n bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0);\n\n //Calculate another 16 elements, because the store operation _mm256_storeu_si256 can store 32*8=256 bits at once\n // skip 16*3=48 bytes\n // Because of this, can't use _mm256_load_si256 where address needs to be 32-byte aligned\n gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0 = _mm256_loadu_si256((__m256i *)&img[pixel_index + 48]);\n // skip 16*3+32=80 bytes\n bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA = _mm256_loadu_si256((__m256i *)&img[pixel_index + 80]);\n\n GatherRGBx8_avx(gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0,\n bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA,\n &rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0,\n &gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0,\n &bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0);\n\n __m256i yF_yE_yD_yC_yB_yA_y9_y8 = Rgb2Yx8_avx(rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0,\n gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0,\n bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0);\n\n __m256i yF_yE_yD_yC_y7_y6_y5_y4_yB_yA_y9_y8_y3_y2_y1_y0 = _mm256_packus_epi16(y7_y6_y5_y4_y3_y2_y1_y0, yF_yE_yD_yC_yB_yA_y9_y8);\n\n __m256i yF_yE_yD_yC_yB_yA_y9_y8_y7_y6_y5_y4_y3_y2_y1_y0 = _mm256_permute4x64_epi64(\n yF_yE_yD_yC_y7_y6_y5_y4_yB_yA_y9_y8_y3_y2_y1_y0,\n _MM_SHUFFLE(3, 1, 2, 0));\n\n _mm256_store_si256((__m256i *)&result[i], yF_yE_yD_yC_yB_yA_y9_y8_y7_y6_y5_y4_y3_y2_y1_y0);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/carlotacb/PAR-Laboratoris/LAB 4/session_codes/lab4/multisort-omp-op1-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ase\n\t\tbasicsort(n, data);\n\t}\n}\n\nstatic void initialize(long length, T data[length]) {\n long i;\n for (i = 0; i < length; i++) {\n if (i==0) {\n data[i] = rand();\n } else {\n data[i] = ((data[i-1]+1) * i * 104723L) % N;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/carlotacb/PAR-Laboratoris/LAB 4/session_codes/lab4/multisort-omp-op1-2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * i * 104723L) % N;\n }\n }\n}\n\nstatic void clear(long length, T data[length]) {\n long i;\n for (i = 0; i < length; i++) {\n data[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/carlotacb/PAR-Laboratoris/LAB 3/codis/for/mandel-omp-for-row.c", "omp_pragma_line": "#pragma omp parallel for schedule(runtime) private(col)", "context_chars": 100, "text": "or)\n#else\n int ** output) \n#endif\n\n{\n /* Calculate points and save/display */\n for (row = 0; row < height; ++row) {\n for (col = 0; col < width; ++col) {\n complex z, c;\n\n \n z.real = z.imag = 0;\n /* Scale display coordinates to actual region */\n \n\t c.real = real_min + ((double) col * scale_real);\n c.imag = imag_min + ((double) (height-1-row) * scale_imag);\n /* height-1-row so y axis displays\n * with larger values at top\n */\n\t \n /* Calculate z0, z1, .... until divergence or maximum iterations */\n int k = 0;\n double lengthsq, temp;\n do {\n temp = z.real*z.real - z.imag*z.imag + c.real;\n z.imag = 2*z.real*z.imag + c.imag;\n z.real = temp;\n lengthsq = z.real*z.real + z.imag*z.imag;\n ++k;\n } while (lengthsq < (N*N) && k < maxiter);\n\n#if _DISPLAY_\n /* Scale color and display point */\n long color = (long) ((k-1) * scale_color) + min_color;\n if (setup_return == EXIT_SUCCESS) {\n XSetForeground (display, gc, color);\n XDrawPoint (display, win, gc, col, row);\n }\n#else\n\t output[row][col]=k;\n\n }\n } #pragma omp parallel for schedule(runtime) private(col)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/carlotacb/PAR-Laboratoris/LAB 3/codis/for/mandel-omp-for-point.c", "omp_pragma_line": "#pragma omp parallel for schedule(runtime) firstprivate(row)", "context_chars": 100, "text": "dif\n\n{\n /* Calculate points and save/display */\n for (row = 0; row < height; ++row) {\n for (col = 0; col < width; ++col) {\n complex z, c;\n\n \n z.real = z.imag = 0;\n /* Scale display coordinates to actual region */\n \n\t c.real = real_min + ((double) col * scale_real);\n c.imag = imag_min + ((double) (height-1-row) * scale_imag);\n /* height-1-row so y axis displays\n * with larger values at top\n */\n\t \n /* Calculate z0, z1, .... until divergence or maximum iterations */\n int k = 0;\n double lengthsq, temp;\n do {\n temp = z.real*z.real - z.imag*z.imag + c.real;\n z.imag = 2*z.real*z.imag + c.imag;\n z.real = temp;\n lengthsq = z.real*z.real + z.imag*z.imag;\n ++k;\n } while (lengthsq < (N*N) && k < maxiter);\n\n#if _DISPLAY_\n /* Scale color and display point */\n long color = (long) ((k-1) * scale_color) + min_color;\n if (setup_return == EXIT_SUCCESS) {\n XSetForeground (display, gc, color);\n XDrawPoint (display, win, gc, col, row);\n }\n#else\n\t output[row][col]=k;\n\n } #pragma omp parallel for schedule(runtime) firstprivate(row)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/gurbaaz27/CS433A-Design-Exercises/assignment3/Q2_openmp.cpp", "omp_pragma_line": "#pragma omp parallel for num_threads (t) private(i, j)", "context_chars": 100, "text": " = (float)(i)/(n*n);\n }\n\n gettimeofday(&tv0, &tz0);\n \n // row version\n int i, j;\n for (i = 0; i < n; i++) {\n for (j = 0; j < n; j++) {\n y[i] += A[i][j] * x[j];\n }\n } #pragma omp parallel for num_threads (t) private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arslansadiq/Parallel-Programming-TUM/Assignment8_Loop_Fusion/student/loop_fusion_par.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_threads)", "context_chars": 100, "text": "gned long **a, unsigned long **b, unsigned long **c, unsigned long **d, int N, int num_threads) {\n\n\tfor (int i = 1; i < N; i++) {\n\n\t\tint j = 1;\n\t\ta[i][j] = 2 * b[i][j];\n\t\td[i][j] = a[i][j] * c[i][j];\n\t\tc[i][j - 1] = a[i][j - 1] - 2 * b[i][j + 1];\n\n\t\tfor (j = 2; j < N - 1; j++) {\n\t\t\ta[i][j] = 2 * b[i][j];\n\t\t\td[i][j] = a[i][j] * c[i][j];\n\n\t\t\tc[i][j - 1] = 2 * b[i][j - 1] - 2 * b[i][j + 1];\n\t\t}\n\n\t\tj = N - 1;\n\t\ta[i][j] = 2 * b[i][j];\n\t\td[i][j] = a[i][j] * c[i][j];\n\t\tc[i][j - 1] = 2 * b[i][j - 1] - a[i][j + 1];\n\t} #pragma omp parallel for num_threads(num_threads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arslansadiq/Parallel-Programming-TUM/Assignment4_Openmp_1/student/x_gradient_par.h", "omp_pragma_line": "#pragma omp parallel for schedule (dynamic,5)", "context_chars": 100, "text": "typename channel_type::type dst_channel_t;\n\n omp_set_num_threads(num_threads);\n \n for (int y=0; y());\n }\n } #pragma omp parallel for schedule (dynamic,5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arslansadiq/Parallel-Programming-TUM/Assignment12_Hybrid/helper.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_threads) reduction(+:heat)", "context_chars": 100, "text": "t_omp(double *h_new, int n, int num_threads)\n{\n double heat = 0.0; // total heat in system\n //for (int i = 1; i < n + 1; ++i)\n {\n for (int j = 1; j < n + 1; ++j)\n {\n heat += h_new[map(i, j, n+2)];\n }\n } #pragma omp parallel for num_threads(num_threads) reduction(+:heat)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arslansadiq/Parallel-Programming-TUM/Assignment6_OpenMP_First touch/student/laplace_par.h", "omp_pragma_line": "#pragma omp parallel for schedule(static) proc_bind(spread)", "context_chars": 100, "text": "\ninline void initialize(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2])\n{\n for (int i = 0; i < SIZE + 2; i++)\n for (int j = 0; j < SIZE + 2; j++)\n {\n a[i][j] = 0.0;\n b[i][j] = 0.0;\n } #pragma omp parallel for schedule(static) proc_bind(spread)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/arslansadiq/Parallel-Programming-TUM/Assignment6_OpenMP_First touch/student/laplace_par.h", "omp_pragma_line": "#pragma omp parallel for schedule(static) proc_bind(spread)", "context_chars": 100, "text": "double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2], int n)\n{\n if (n % 2 == 0)\n {\n \tfor (int i = 1; i < SIZE + 1; i++)\n for (int j = 1; j < SIZE + 1; j++)\n b[i][j] = (a[i + 1][j] + a[i - 1][j] + a[i][j - 1] + a[i][j + 1]) / 4.0;\n }\n else\n {\n \t#pragma omp parallel for schedule(static) proc_bind(spread)\n for (int i = 1; i < SIZE + 1; i++)\n for (int j = 1; j < SIZE + 1; j++)\n a[i][j] = (b[i + 1][j] + b[i - 1][j] + b[i][j - 1] + b[i][j + 1]) / 4.0;\n } #pragma omp parallel for schedule(static) proc_bind(spread)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tudasc/MPI-Corrbench/micro-benches/0-level/openmp/threading/missing_init_thread.c", "omp_pragma_line": "#pragma omp parallel for num_threads(NUM_THREADS)", "context_chars": 100, "text": "uffer_out[10], buffer_in[10];\n\n MPI_Init(&argc, &argv);\n MPI_Comm_rank(MPI_COMM_WORLD, &myRank);\n\nfor (int i = 0; i < 10; i++) {\n buffer_out[i] = i * 10;\n } #pragma omp parallel for num_threads(NUM_THREADS)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/deliverables/par2312-lab5/code/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "matrix into another\n */\n\nvoid copy_mat (double *u, double *v, unsigned sizex, unsigned sizey)\n{\n for (int i = 1; i <= sizex - 2; i++) {\n for (int j = 1; j <= sizey-2; j++) {\n v[i * sizey + j] = u[i * sizey + j];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/deliverables/par2312-lab5/code/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for private(diff) reduction(+:sum)", "context_chars": 100, "text": "ed sizex, unsigned sizey)\n{\n double diff, sum=0.0;\n\n int howmany = omp_get_max_threads();\n for (int blockid = 0; blockid < howmany; ++blockid) {\n \tint i_start = lowerb(blockid, howmany, sizex);\n \tint i_end = upperb(blockid, howmany, sizex);\n \tfor (int i = max(1, i_start); i <= min(sizex - 2, i_end); i++) {\n\t for (int j = 1; j <= sizey - 2; j++) {\n\t\t utmp[i * sizey + j]= 0.25 * ( u[i * sizey + (j - 1)] + // left\n\t\t u[i * sizey + (j + 1)] + // right\n\t\t\t\t\t u[(i - 1) * sizey + j] + // top\n\t\t\t\t\t u[(i + 1) * sizey + j]); // bottom\n\t\t diff = utmp[i * sizey + j] - u[i * sizey + j];\n\t\t sum += diff * diff;\n\t\t\t}\n \t}\n } #pragma omp parallel for private(diff) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/deliverables/par2312-lab5/code/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(diff, unew) reduction(+:sum)", "context_chars": 100, "text": " for(int i = 0; i < howmany; ++i) {\n \tprocessedBlocks[i] = 0;\n }\n int nBlocs = 32;\n\n for (int i = 0; i < howmany; ++i) {\n int ii_start = lowerb(i, howmany, sizex);\n int ii_end = upperb(i, howmany, sizex);\n for (int j = 0; j < nBlocs; j++){\n int jj_start = lowerb(j, nBlocs, sizey);\n int jj_end = upperb(j, nBlocs, sizey);\n if(i > 0) {\n while(processedBlocks[i - 1] <= j) {\n\t\t \t\t#pragma omp flush\n }\n }\n for (int ii = max(1, ii_start); ii <= min(sizex - 2, ii_end); ii++) {\n for(int jj = max(1, jj_start); jj <= min(sizey - 2, jj_end); jj++) {\n unew = 0.25* (u[ii * sizey + (jj - 1)] + // left\n u[ii * sizey + (jj + 1)] + // right\n u[(ii - 1) * sizey + jj] + // top\n u[(ii + 1) * sizey + jj]); // bottom\n diff = unew - u[ii * sizey + jj];\n sum += diff * diff;\n u[ii * sizey + jj] = unew;\n }\n }\n ++processedBlocks[i];\n\t #pragma omp flush\n }\n } #pragma omp parallel for schedule(static) private(diff, unew) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/deliverables/par2312-lab4/code/multisort-omp-tree-optional.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ase\n\t\tbasicsort(n, data);\n\t}\n}\n\nstatic void initialize(long length, T data[length]) {\n long i;\n for (i = 0; i < length; i++) {\n if (i==0) {\n data[i] = rand();\n } else {\n data[i] = ((data[i-1]+1) * i * 104723L) % N;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/deliverables/par2312-lab4/code/multisort-omp-tree-optional.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * i * 104723L) % N;\n }\n }\n}\n\nstatic void clear(long length, T data[length]) {\n long i;\n for (i = 0; i < length; i++) {\n data[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/deliverables/par2312-lab4/code/multisort-omp-tree.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 8)", "context_chars": 100, "text": "ase\n\t\tbasicsort(n, data);\n\t}\n}\n\nstatic void initialize(long length, T data[length]) {\n long i;\n for (i = 0; i < length; i++) {\n if (i==0) {\n data[i] = rand();\n } else {\n data[i] = ((data[i-1]+1) * i * 104723L) % N;\n }\n } #pragma omp parallel for schedule(static, 8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/deliverables/par2312-lab4/code/multisort-omp-tree.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 8)", "context_chars": 100, "text": " * i * 104723L) % N;\n }\n }\n}\n\nstatic void clear(long length, T data[length]) {\n long i;\n for (i = 0; i < length; i++) {\n data[i] = 0;\n } #pragma omp parallel for schedule(static, 8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/boada/lab5/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "matrix into another\n */\n\nvoid copy_mat (double *u, double *v, unsigned sizex, unsigned sizey)\n{\n for (int i=1; i<=sizex-2; i++)\n for (int j=1; j<=sizey-2; j++) \n v[ i*sizey+j ] = u[ i*sizey+j ];\n}\n\n/*\n * Blocked Jacobi solver: one iteration step\n */\ndouble relax_jacobi (double *u, double *utmp, unsigned sizex, unsigned sizey)\n{\n double diff, sum=0.0;\n \n int howmany = omp_get_max_threads();\n #pragma omp parallel for private(diff) reduction(+:sum)\n for (int blockid = 0; blockid < howmany; ++blockid) {\n int i_start = lowerb(blockid, howmany, sizex);\n int i_end = upperb(blockid, howmany, sizex);\n for (int i=max(1, i_start); i<= min(sizex-2, i_end); i++) {\n for (int j=1; j<= sizey-2; j++) {\n\t utmp[i*sizey+j]= 0.25 * ( u[i * sizey + (j - 1)]+ // left\n\t u[i * sizey + (j + 1)]+ // right\n\t\t\t\t u[(i - 1) * sizey + j]+ // top\n\t\t\t\t u[(i + 1) * sizey + j]); // bottom\n\t diff = utmp[i*sizey+j] - u[i*sizey + j];\n\t sum += diff * diff; \n\t }\n }\n }\n\n return sum;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/boada/lab5/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for private(diff) reduction(+:sum)", "context_chars": 100, "text": " sizex, unsigned sizey)\n{\n double diff, sum=0.0;\n \n int howmany = omp_get_max_threads();\n for (int blockid = 0; blockid < howmany; ++blockid) {\n int i_start = lowerb(blockid, howmany, sizex);\n int i_end = upperb(blockid, howmany, sizex);\n for (int i=max(1, i_start); i<= min(sizex-2, i_end); i++) {\n for (int j=1; j<= sizey-2; j++) {\n\t utmp[i*sizey+j]= 0.25 * ( u[i * sizey + (j - 1)]+ // left\n\t u[i * sizey + (j + 1)]+ // right\n\t\t\t\t u[(i - 1) * sizey + j]+ // top\n\t\t\t\t u[(i + 1) * sizey + j]); // bottom\n\t diff = utmp[i*sizey+j] - u[i*sizey + j];\n\t sum += diff * diff; \n\t }\n }\n } #pragma omp parallel for private(diff) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/boada/lab5/solver-omp.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) private(diff,unew) reduction(+:sum)", "context_chars": 100, "text": "ny];\n for(int i = 0; i < howmany; ++i) processedBlocks[i] = 0;\n int nBlocs = 8;\n \n \n for (int i = 0; i < howmany; ++i) {\n int ii_start = lowerb(i, howmany, sizex);\n int ii_end = upperb(i, howmany, sizex);\n for (int j = 0; j < nBlocs; j++){\n int jj_start = lowerb(j,nBlocs,sizey);\n int jj_end = upperb(j,nBlocs,sizey);\n if(i > 0){\n while(processedBlocks[i-1]<=j){\n\t\t #pragma omp flush\n }\n }\n \n for (int ii=max(1, ii_start); ii<= min(sizex-2, ii_end); ii++) {\n for(int jj= max(1,jj_start); jj<= min(sizey-2, jj_end); jj++){\n unew = 0.25* (u[ii * sizey + (jj-1)] + // left\n u[ii * sizey + (jj+1)] + // right\n u[(ii-1) * sizey + jj] + // top\n u[(ii+1) * sizey + jj]); // bottom\n diff = unew - u[ii * sizey + jj];\n sum += diff*diff;\n u[ii*sizey+jj] = unew;\n }\n }\n ++processedBlocks[i];\n\t #pragma omp flush\n }\n } #pragma omp parallel for schedule(static) private(diff,unew) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/boada/lab2/openmp/worksharing/4.collapse.c", "omp_pragma_line": "#pragma omp parallel for private(j) ", "context_chars": 100, "text": " to make it correct. */\n\nint main() \n{\n int i,j;\n\n omp_set_num_threads(8);\n for (i=0; i < N; i++) {\n for (j=0; j < N; j++) {\n\t int id=omp_get_thread_num();\n\t printf(\"(%d) Iter (%d %d)\\n\",id,i,j);\t\n\t}\n } #pragma omp parallel for private(j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/boada/lab2/openmp/worksharing/6.doacross.c", "omp_pragma_line": "#pragma omp parallel for ordered(1) schedule(dynamic)", "context_chars": 100, "text": " float a[N], b[N], c[N];\n float a1[M][M], b1[M][M], c1[M][M];\n\n omp_set_num_threads(8);\n for (int i=1; i #pragma omp parallel for ordered(1) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/boada/lab2/openmp/worksharing/6.doacross.c", "omp_pragma_line": "#pragma omp parallel for ordered(2) schedule(dynamic)", "context_chars": 100, "text": "b[i] = a[i] * b[i-1];\n\t }\n\t#pragma omp ordered depend(source)\n\n\tc[i] = b[i] * 0.1234;\n }\n\n for (int i=1; i #pragma omp parallel for ordered(2) schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/felixarpa/PAR-Lab/boada/lab2/openmp/worksharing/1.for.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\",id,i);\t\n }\n }\n\n printf(\"\\nGoing to distribute iterations in second loop ...\\n\");\n for (i=0; i < N+3; i++) {\n\tint id=omp_get_thread_num();\n\tprintf(\"(%d) gets iteration %d\\n\",id,i);\t\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphCSR.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "es_array->inverse_label_array;\n label_array_el = graphCSR->sorted_edges_array->label_array;\n\n for (v = 0; v < num_vertices; ++v)\n {\n uint32_t u = label_array_el[v];\n uint32_t t = label_array_iel[v];\n\n inverse_label_array_el[u] = t;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphCSR.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " uint32_t t = label_array_iel[v];\n\n inverse_label_array_el[u] = t;\n }\n\n#if DIRECTED\n for (v = 0; v < num_vertices; ++v)\n {\n uint32_t u = label_array_el[v];\n uint32_t t = label_array_iel[v];\n\n inverse_label_array_iel[t] = u;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjArrayList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "geList *edgeList)\n{\n\n uint32_t i;\n uint32_t src;\n\n#if DIRECTED\n uint32_t dest;\n#endif\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n src = edgeList->edges_array_src[i];\n\n #pragma omp atomic update\n graphAdjArrayList->vertices[src].out_degree++;\n\n#if DIRECTED\n dest = edgeList->edges_array_dest[i];\n\n #pragma omp atomic update\n graphAdjArrayList->vertices[dest].in_degree++;\n\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjArrayList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "djArrayList *graphAdjArrayList, struct EdgeList *edgeList)\n{\n\n uint32_t i;\n uint32_t src;\n for(i = 0; i < edgeList->num_edges; i++)\n {\n\n src = edgeList->edges_array_src[i];\n\n #pragma omp atomic update\n graphAdjArrayList->vertices[src].out_degree++;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjArrayList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "st *graphAdjArrayList, struct EdgeList *inverseEdgeList)\n{\n\n uint32_t i;\n uint32_t dest;\n\n for(i = 0; i < inverseEdgeList->num_edges; i++)\n {\n\n dest = inverseEdgeList->edges_array_src[i];\n\n #pragma omp atomic update\n graphAdjArrayList->vertices[dest].in_degree++;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjArrayList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " *graphAdjArrayListEdgeAllocate(struct GraphAdjArrayList *graphAdjArrayList)\n{\n\n uint32_t v;\n for(v = 0; v < graphAdjArrayList->num_vertices; v++)\n {\n\n adjArrayListCreateNeighbourList(&(graphAdjArrayList->vertices[v]));\n\n#if DIRECTED\n graphAdjArrayList->vertices[v].in_degree = 0;\n\n graphAdjArrayList->vertices[v].out_degree = 0; // will be used as an index to edge array outnode\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjArrayList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "geAllocateInodes(struct GraphAdjArrayList *graphAdjArrayList)\n{\n#if DIRECTED\n uint32_t v;\n // for(v = 0; v < graphAdjArrayList->num_vertices; v++)\n {\n\n adjArrayListCreateNeighbourListInNodes(&(graphAdjArrayList->vertices[v]));\n\n\n graphAdjArrayList->vertices[v].in_degree = 0;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjArrayList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rrayListEdgeAllocateOutNodes(struct GraphAdjArrayList *graphAdjArrayList)\n{\n\n uint32_t v;\n // for(v = 0; v < graphAdjArrayList->num_vertices; v++)\n {\n\n adjArrayListCreateNeighbourListOutNodes(&(graphAdjArrayList->vertices[v]));\n\n graphAdjArrayList->vertices[v].out_degree = 0; // will be used as an index to edge array outnode\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjArrayList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n\n#if DIRECTED\n uint32_t dest;\n uint32_t in_degree;\n#endif\n\n uint32_t out_degree;\n\n // for(i = 0; i < edgeList->num_edges; i++)\n {\n\n src = edgeList->edges_array_src[i];\n\n\n // out_degree = __sync_fetch_and_add(&(graphAdjArrayList->vertices[src].out_degree), 1);y\n\n out_degree = graphAdjArrayList->vertices[src].out_degree;\n\n graphAdjArrayList->vertices[src].outNodes->edges_array_src[out_degree] = edgeList->edges_array_src[i];\n graphAdjArrayList->vertices[src].outNodes->edges_array_dest[out_degree] = edgeList->edges_array_dest[i];\n#if WEIGHTED\n graphAdjArrayList->vertices[src].outNodes->edges_array_weight[out_degree] = edgeList->edges_array_weight[i];\n\n\n graphAdjArrayList->vertices[src].out_degree++;\n\n\n#if DIRECTED\n dest = edgeList->edges_array_dest[i];\n\n in_degree = __sync_fetch_and_add(&(graphAdjArrayList->vertices[src].in_degree), 1);\n\n graphAdjArrayList->vertices[dest].inNodes->edges_array_src[in_degree] = edgeList->edges_array_src[i];\n graphAdjArrayList->vertices[dest].inNodes->edges_array_dest[in_degree] = edgeList->edges_array_dest[i];\n#if WEIGHTED\n graphAdjArrayList->vertices[dest].inNodes->edges_array_weight[in_degree] = edgeList->edges_array_weight[i];\n\n\n\n\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/vertex.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(vertices,graph)", "context_chars": 100, "text": " = i;\n }\n }\n }\n }\n\n#if DIRECTED\n if(!inverse)\n {\n\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n graph->inverse_vertices->in_degree[vertex_id] = vertices->out_degree[vertex_id];\n } #pragma omp parallel for default(none) private(vertex_id) shared(vertices,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/vertex.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(vertices,graph)", "context_chars": 100, "text": "es->in_degree[vertex_id] = vertices->out_degree[vertex_id];\n }\n\n }\n else\n {\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n graph->vertices->in_degree[vertex_id] = vertices->out_degree[vertex_id];\n } #pragma omp parallel for default(none) private(vertex_id) shared(vertices,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/bitmap.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ord - 1) / kBitsPerWord)));\n\n // uint32_t *word = bitmap->bitarray;\n // uint32_t i;\n\n // // for(i= 0 ; i <((bitmap->size+kBitsPerWord - 1)/kBitsPerWord); i++){\n // word[i] = 0;\n // } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/bitmap.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:numSetBits) schedule(dynamic,256)", "context_chars": 100, "text": "nt32_t getNumOfSetBits (struct Bitmap *bitmap)\n{\n\n uint32_t i;\n uint32_t numSetBits = 0;\n\n for(i = 0 ; i < (bitmap->size); i++)\n {\n if(getBit(bitmap, i))\n numSetBits++;\n } #pragma omp parallel for reduction(+:numSetBits) schedule(dynamic,256)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/worklist.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "2 = workList_temp;\n\n}\n\nvoid resetWorkList(uint8_t *workList, uint32_t size)\n{\n\n uint32_t i;\n\n for(i = 0; i < size ; i++)\n {\n workList[i] = 0;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/worklist.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ist[i] = 0;\n\n }\n\n\n}\n\nvoid setWorkList(uint8_t *workList, uint32_t size)\n{\n\n uint32_t i;\n\n for(i = 0; i < size ; i++)\n {\n workList[i] = 1;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(grid,totalPartitions) private(i)", "context_chars": 100, "text": "itions = 0;\n totalPartitions = grid->num_partitions * grid->num_partitions;\n uint32_t i;\n\n for (i = 0; i < totalPartitions; ++i)\n {\n grid->activePartitions[i] = 0;\n } #pragma omp parallel for default(none) shared(grid,totalPartitions) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx)", "context_chars": 100, "text": " uint32_t i;\n uint32_t totalPartitions = 0;\n totalPartitions = grid->num_partitions;\n\n // for ( i = 0; i < totalPartitions; ++i)\n {\n\n Partition_idx = (row * totalPartitions) + i;\n\n if(grid->partitions[Partition_idx].edgeList->num_edges)\n {\n if(!getBit(grid->activePartitionsMap, Partition_idx))\n {\n setBitAtomic(grid->activePartitionsMap, Partition_idx);\n }\n }\n } #pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx)", "context_chars": 100, "text": " uint32_t i;\n uint32_t totalPartitions = 0;\n totalPartitions = grid->num_partitions;\n\n // for ( i = 0; i < totalPartitions; ++i)\n {\n\n Partition_idx = (row * totalPartitions) + i;\n if(grid->partitions[Partition_idx].edgeList->num_edges)\n {\n grid->activePartitions[Partition_idx] = 1;\n }\n } #pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)", "context_chars": 100, "text": "id->num_vertices);\n grid->activePartitionsMap = newBitmap(totalPartitions);\n\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i)\n {\n\n grid->partitions[i].num_edges = 0;\n grid->partitions[i].num_vertices = 0; /* code */\n grid->activePartitions[i] = 0;\n\n\n } #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(grid)", "context_chars": 100, "text": "->partitions[i].num_vertices = 0; /* code */\n grid->activePartitions[i] = 0;\n\n\n }\n\n\n for (i = 0; i < grid->num_vertices ; ++i)\n {\n\n grid->out_degree[i] = 0;\n grid->in_degree[i] = 0;\n\n } #pragma omp parallel for default(none) private(i) shared(grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,grid)", "context_chars": 100, "text": "Grid *grid, struct EdgeList *edgeList)\n{\n\n uint32_t i;\n uint32_t src;\n uint32_t dest;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n #pragma omp atomic update\n grid->out_degree[src]++;\n\n #pragma omp atomic update\n grid->in_degree[dest]++;\n\n } #pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)", "context_chars": 100, "text": "m_vertices = 0;\n uint32_t totalPartitions = grid->num_partitions * grid->num_partitions;\n\n // #pragma omp parallel for default(none) private(i,src,dest,num_vertices) shared(totalPartitions,grid) schedule(dynamic,1024)\n for ( j = 0; j < totalPartitions; ++j)\n {\n num_vertices = 0;\n // #pragma omp parallel for default(none) private(i,src,dest) shared(j,grid) schedule(dynamic,1024) reduction(max:num_vertices)\n for(i = 0; i < grid->partitions[j].edgeList->num_edges; i++)\n {\n\n src = grid->partitions[j].edgeList->edges_array_src[i];\n dest = grid->partitions[j].edgeList->edges_array_dest[i];\n\n num_vertices = maxTwoIntegers(num_vertices, maxTwoIntegers(src, dest));\n\n }\n\n grid->partitions[j].num_vertices = num_vertices;\n grid->partitions[j].edgeList->num_vertices = num_vertices;\n } #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest,num_vertices) shared(totalPartitions,grid) schedule(dynamic,1024)", "context_chars": 100, "text": "titions;\n\n // #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)\n for ( j = 0; j < totalPartitions; ++j)\n {\n num_vertices = 0;\n // #pragma omp parallel for default(none) private(i,src,dest) shared(j,grid) schedule(dynamic,1024) reduction(max:num_vertices)\n for(i = 0; i < grid->partitions[j].edgeList->num_edges; i++)\n {\n\n src = grid->partitions[j].edgeList->edges_array_src[i];\n dest = grid->partitions[j].edgeList->edges_array_dest[i];\n\n num_vertices = maxTwoIntegers(num_vertices, maxTwoIntegers(src, dest));\n\n }\n\n grid->partitions[j].num_vertices = num_vertices;\n grid->partitions[j].edgeList->num_vertices = num_vertices;\n } #pragma omp parallel for default(none) private(i,src,dest,num_vertices) shared(totalPartitions,grid) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest) shared(j,grid) schedule(dynamic,1024) reduction(max:num_vertices)", "context_chars": 100, "text": "dynamic,1024)\n for ( j = 0; j < totalPartitions; ++j)\n {\n num_vertices = 0;\n // for(i = 0; i < grid->partitions[j].edgeList->num_edges; i++)\n {\n\n src = grid->partitions[j].edgeList->edges_array_src[i];\n dest = grid->partitions[j].edgeList->edges_array_dest[i];\n\n num_vertices = maxTwoIntegers(num_vertices, maxTwoIntegers(src, dest));\n\n } #pragma omp parallel for default(none) private(i,src,dest) shared(j,grid) schedule(dynamic,1024) reduction(max:num_vertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid)", "context_chars": 100, "text": "titions;\n uint32_t num_vertices = grid->num_vertices;\n\n\n uint32_t row;\n uint32_t col;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n // __sync_fetch_and_add(&grid->out_degree[src],1);\n // __sync_fetch_and_add(&grid->in_degree[dest],1);\n\n row = getPartitionID(num_vertices, num_partitions, src);\n col = getPartitionID(num_vertices, num_partitions, dest);\n Partition_idx = (row * num_partitions) + col;\n\n // __sync_fetch_and_add(&grid->partitions[Partition_idx].num_edges,1);\n\n #pragma omp atomic update\n grid->partitions[Partition_idx].num_edges++;\n\n } #pragma omp parallel for default(none) private(i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Edge_idx,i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid)", "context_chars": 100, "text": "tions;\n uint32_t num_vertices = grid->num_vertices;\n\n uint32_t row;\n uint32_t col;\n\n\n\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n\n\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n row = getPartitionID(num_vertices, num_partitions, src);\n col = getPartitionID(num_vertices, num_partitions, dest);\n Partition_idx = (row * num_partitions) + col;\n\n Edge_idx = __sync_fetch_and_add(&grid->partitions[Partition_idx].num_edges, 1);\n\n grid->partitions[Partition_idx].edgeList->edges_array_src[Edge_idx] = edgeList->edges_array_src[i];\n grid->partitions[Partition_idx].edgeList->edges_array_dest[Edge_idx] = edgeList->edges_array_dest[i];\n#if WEIGHTED\n grid->partitions[Partition_idx].edgeList->edges_array_weight[Edge_idx] = edgeList->edges_array_weight[i];\n\n } #pragma omp parallel for default(none) private(Edge_idx,i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)", "context_chars": 100, "text": "{\n\n uint32_t i;\n uint32_t totalPartitions = grid->num_partitions * grid->num_partitions;\n\n for ( i = 0; i < totalPartitions; ++i)\n {\n\n grid->partitions[i].edgeList = newEdgeList(grid->partitions[i].num_edges);\n grid->partitions[i].edgeList->num_vertices = grid->partitions[i].num_vertices;\n grid->partitions[i].num_edges = 0;\n\n } #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjLinkedList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ices = (struct AdjLinkedList *) my_malloc( V * sizeof(struct AdjLinkedList));\n\n\n uint32_t i;\n for(i = 0; i < V; i++)\n {\n\n graphAdjLinkedList->vertices[i].outNodes = NULL;\n graphAdjLinkedList->vertices[i].out_degree = 0;\n\n#if DIRECTED\n graphAdjLinkedList->vertices[i].inNodes = NULL;\n graphAdjLinkedList->vertices[i].in_degree = 0;\n\n\n graphAdjLinkedList->vertices[i].visited = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjLinkedList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " WEIGHTED\n graphAdjLinkedList->max_weight = edgeList->max_weight;\n#endif\n\n\n\n uint32_t i;\n for(i = 0; i < graphAdjLinkedList->num_vertices; i++)\n {\n\n graphAdjLinkedList->vertices[i].outNodes = NULL;\n graphAdjLinkedList->vertices[i].out_degree = 0;\n\n#if DIRECTED\n graphAdjLinkedList->vertices[i].inNodes = NULL;\n graphAdjLinkedList->vertices[i].in_degree = 0;\n\n\n graphAdjLinkedList->vertices[i].visited = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjLinkedList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ex_lock = (omp_lock_t *) my_malloc( graphAdjLinkedList->num_vertices * sizeof(omp_lock_t));\n\n\n\n for (i = 0; i < graphAdjLinkedList->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjLinkedList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "raphAdjLinkedList->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n }\n\n // for(i = 0; i < edgeList->num_edges; i++)\n {\n\n adjLinkedListAddEdge(graphAdjLinkedList, edgeList, i, vertex_lock);\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/graphAdjLinkedList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i++)\n {\n\n adjLinkedListAddEdge(graphAdjLinkedList, edgeList, i, vertex_lock);\n\n }\n\n for (i = 0; i < graphAdjLinkedList->num_vertices; i++)\n {\n omp_destroy_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/arrayQueue.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n\n}\n\n\n\nvoid arrayQueueGenerateBitmap(struct ArrayQueue *q)\n{\n\n uint32_t v;\n uint32_t i;\n\n for(i = q->head ; i < q->tail; i++)\n {\n v = q->queue[i];\n setBitAtomic(q->q_bitmap, v);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/arrayQueue.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(q,b) private(v,i)", "context_chars": 100, "text": " arrayQueueToBitmap(struct ArrayQueue *q, struct Bitmap *b)\n{\n\n uint32_t v;\n uint32_t i;\n\n for(i = q->head ; i < q->tail; i++)\n {\n v = q->queue[i];\n setBitAtomic(b, v);\n } #pragma omp parallel for default(none) shared(q,b) private(v,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/arrayQueue.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(q,b,labels) private(v,i,inv_u) num_threads(num_threads_max)", "context_chars": 100, "text": "t v;\n uint32_t i;\n uint32_t inv_u;\n uint32_t num_threads_max = omp_get_max_threads();\n\n for(i = q->head ; i < q->tail; i++)\n {\n v = q->queue[i];\n inv_u = labels[v];\n setBitAtomic(b, inv_u);\n } #pragma omp parallel for default(none) shared(q,b,labels) private(v,i,inv_u) num_threads(num_threads_max)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/edgeList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t->edges_array_weight = (float *) my_malloc(num_edges * sizeof(float));\n#endif\n\n uint32_t i;\n for(i = 0; i < num_edges; i++)\n {\n newEdgeList->edges_array_dest[i] = 0;\n newEdgeList->edges_array_src[i] = 0;\n#if WEIGHTED\n newEdgeList->edges_array_weight[i] = 0;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/edgeList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "->inverse_label_array = (uint32_t *) my_malloc(tempEdgeList->num_vertices * sizeof(uint32_t));\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n tempEdgeList->mask_array[i] = edgeList->mask_array[i] ;\n tempEdgeList->label_array[i] = edgeList->label_array[i];\n tempEdgeList->inverse_label_array[i] = edgeList->inverse_label_array[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/edgeList.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:num_vertices)", "context_chars": 100, "text": " uint32_t i;\n uint32_t num_vertices = 0;\n\n#if WEIGHTED\n float max_weight = 0;\n#endif\n\n // for(i = 0; i < num_edges; i++)\n {\n src = buf_pointer[((offset) * i) + 0];\n dest = buf_pointer[((offset) * i) + 1];\n // printf(\" %u %lu -> %lu \\n\",i,src,dest);\n#if DIRECTED\n if(!inverse)\n {\n if(symmetric)\n {\n edgeList->edges_array_src[i] = src;\n edgeList->edges_array_dest[i] = dest;\n edgeList->edges_array_src[i + (num_edges)] = dest;\n edgeList->edges_array_dest[i + (num_edges)] = src;\n\n#if WEIGHTED\n if(weighted)\n {\n edgeList->edges_array_weight[i] = buf_pointer_float[((offset) * i) + 2];\n edgeList->edges_array_weight[i + (num_edges)] = edgeList->edges_array_weight[i];\n }\n else\n {\n edgeList->edges_array_weight[i] = generateRandFloat(mt19937var);\n edgeList->edges_array_weight[i + (num_edges)] = edgeList->edges_array_weight[i];\n }\n\n\n }\n else\n {\n edgeList->edges_array_src[i] = src;\n edgeList->edges_array_dest[i] = dest;\n\n#if WEIGHTED\n if(weighted)\n {\n edgeList->edges_array_weight[i] = buf_pointer_float[((offset) * i) + 2];\n }\n else\n {\n edgeList->edges_array_weight[i] = generateRandFloat(mt19937var);\n }\n\n } // symmetric\n } // inverse\n else\n {\n if(symmetric)\n {\n edgeList->edges_array_src[i] = dest;\n edgeList->edges_array_dest[i] = src;\n edgeList->edges_array_src[i + (num_edges)] = src;\n edgeList->edges_array_dest[i + (num_edges)] = dest;\n#if WEIGHTED\n if(weighted)\n {\n edgeList->edges_array_weight[i] = buf_pointer_float[((offset) * i) + 2];\n edgeList->edges_array_weight[i + (num_edges)] = edgeList->edges_array_weight[i];\n }\n else\n {\n edgeList->edges_array_weight[i] = generateRandFloat(mt19937var);\n edgeList->edges_array_weight[i + (num_edges)] = edgeList->edges_array_weight[i];\n }\n\n }\n else\n {\n edgeList->edges_array_src[i] = dest;\n edgeList->edges_array_dest[i] = src;\n#if WEIGHTED\n if(weighted)\n {\n edgeList->edges_array_weight[i] = buf_pointer_float[((offset) * i) + 2];\n }\n else\n {\n edgeList->edges_array_weight[i] = generateRandFloat(mt19937var);\n }\n\n }// symmetric\n }// inverse\n#else\n if(symmetric)\n {\n edgeList->edges_array_src[i] = src;\n edgeList->edges_array_dest[i] = dest;\n edgeList->edges_array_src[i + (num_edges)] = dest;\n edgeList->edges_array_dest[i + (num_edges)] = src;\n#if WEIGHTED\n if(weighted)\n {\n edgeList->edges_array_weight[i] = 1;\n edgeList->edges_array_weight[i + (num_edges)] = edgeList->edges_array_weight[i];\n }\n else\n {\n edgeList->edges_array_weight[i] = buf_pointer_float[((offset) * i) + 2];\n edgeList->edges_array_weight[i + (num_edges)] = edgeList->edges_array_weight[i];\n }\n\n }\n else\n {\n edgeList->edges_array_src[i] = src;\n edgeList->edges_array_dest[i] = dest;\n#if WEIGHTED\n if(weighted)\n {\n edgeList->edges_array_weight[i] = buf_pointer_float[((offset) * i) + 2];\n }\n else\n {\n edgeList->edges_array_weight[i] = generateRandFloat(mt19937var);\n }\n\n }\n\n\n num_vertices = maxTwoIntegers(num_vertices, maxTwoIntegers(edgeList->edges_array_src[i], edgeList->edges_array_dest[i]));\n\n#if WEIGHTED\n max_weight = maxTwoFloats(max_weight, edgeList->edges_array_weight[i]);\n\n\n } #pragma omp parallel for reduction(max:num_vertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/edgeList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "List->inverse_label_array = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n edgeList->mask_array[i] = 0;\n edgeList->label_array[i] = i;\n edgeList->inverse_label_array[i] = i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/edgeList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " {\n edgeList->mask_array = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));\n\n for ( i = 0; i < num_vertices; ++i)\n {\n edgeList->mask_array[i] = edgeListmem->mask_array[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/edgeList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "{\n edgeList->label_array = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));\n\n for ( i = 0; i < num_vertices; ++i)\n {\n edgeList->label_array[i] = edgeListmem->label_array[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/edgeList.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " edgeList->inverse_label_array = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));\n\n for ( i = 0; i < num_vertices; ++i)\n {\n edgeList->inverse_label_array[i] = edgeListmem->inverse_label_array[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/edgeList.c", "omp_pragma_line": "#pragma omp parallel for private(src,dest)", "context_chars": 100, "text": " edgeList->inverse_label_array[i] = edgeListmem->inverse_label_array[i];\n }\n }\n\n for(i = 0; i < num_edges; i++)\n {\n src = edgeListmem->edges_array_src[i];\n dest = edgeListmem->edges_array_dest[i];\n#if WEIGHTED\n float weight = edgeListmem->edges_array_weight[i];\n\n // printf(\" %u %lu -> %lu \\n\",src,dest);\n#if DIRECTED\n if(!inverse)\n {\n if(symmetric)\n {\n edgeList->edges_array_src[i] = src;\n edgeList->edges_array_dest[i] = dest;\n\n\n#if WEIGHTED\n\n edgeList->edges_array_weight[i] = weight;\n\n\n\n }\n else\n {\n edgeList->edges_array_src[i] = src;\n edgeList->edges_array_dest[i] = dest;\n\n#if WEIGHTED\n if(weighted)\n {\n edgeList->edges_array_weight[i] = weight;\n }\n else\n {\n edgeList->edges_array_weight[i] = weight;\n }\n\n } // symmetric\n } // inverse\n else\n {\n if(symmetric)\n {\n edgeList->edges_array_src[i] = dest;\n edgeList->edges_array_dest[i] = src;\n\n#if WEIGHTED\n\n edgeList->edges_array_weight[i] = weight;\n\n\n }\n else\n {\n edgeList->edges_array_src[i] = dest;\n edgeList->edges_array_dest[i] = src;\n#if WEIGHTED\n\n edgeList->edges_array_weight[i] = weight;\n\n\n }// symmetric\n }// inverse\n#else\n if(symmetric)\n {\n edgeList->edges_array_src[i] = src;\n edgeList->edges_array_dest[i] = dest;\n\n#if WEIGHTED\n\n edgeList->edges_array_weight[i] = weight;\n\n\n }\n else\n {\n edgeList->edges_array_src[i] = src;\n edgeList->edges_array_dest[i] = dest;\n#if WEIGHTED\n\n edgeList->edges_array_weight[i] = weight;\n\n\n }\n\n } #pragma omp parallel for private(src,dest)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/arrayStack.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n\n}\n\n\n\nvoid arrayStackGenerateBitmap(struct ArrayStack *q)\n{\n\n uint32_t v;\n uint32_t i;\n\n for(i = q->head ; i < q->tail; i++)\n {\n v = q->Stack[i];\n setBitAtomic(q->q_bitmap, v);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/structures/arrayStack.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(q,b) private(v,i)", "context_chars": 100, "text": " arrayStackToBitmap(struct ArrayStack *q, struct Bitmap *b)\n{\n\n uint32_t v;\n uint32_t i;\n\n for(i = q->head ; i < q->tail; i++)\n {\n v = q->Stack[i];\n setBitAtomic(b, v);\n } #pragma omp parallel for default(none) shared(q,b) private(v,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "eof(uint32_t));\n labelsTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < num_vertices; v++)\n {\n pageRanksFP[v] = FloatToFixed32SORT(pageRanks[v]);\n pageRanksFPTemp[v] = 0;\n labelsTemp[v] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < edgeList->num_vertices; v++)\n {\n labelsInverse[v] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "}\n\n labelsInverse = radixSortEdgesByDegree(degrees, labelsInverse, edgeList->num_vertices);\n\n for(v = 0; v < edgeList->num_vertices; v++)\n {\n labels[labelsInverse[v]] = edgeList->num_vertices - 1 - v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\", Seconds(timer));\n printf(\" -----------------------------------------------------\\n\");\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\", Seconds(timer));\n printf(\" -----------------------------------------------------\\n\");\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\", Seconds(timer));\n printf(\" -----------------------------------------------------\\n\");\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * sizeof(uint32_t));\n thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n degrees[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ByDegree(degreesHot[num_buckets - 1], verticesHot[num_buckets - 1], sizeHot[num_buckets - 1]);\n\n for(v = 0; v < sizeHot[1]; v++)\n {\n labels[verticesHot[1][v]] = sizeHot[1] - 1 - v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = 0; v < sizeHot[1]; v++)\n {\n labels[verticesHot[1][v]] = sizeHot[1] - 1 - v;\n }\n\n for(v = 0; v < sizeHot[0]; v++)\n {\n labels[verticesHot[0][v]] = sizeHot[1] + (v);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\", Seconds(timer));\n printf(\" -----------------------------------------------------\\n\");\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * sizeof(uint32_t));\n thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n degrees[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\", Seconds(timer));\n printf(\" -----------------------------------------------------\\n\");\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,mmode)", "context_chars": 100, "text": "ct EdgeList *edgeList, uint32_t mmode)\n{\n\n uint32_t i;\n uint32_t src;\n uint32_t dest;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n switch(mmode)\n {\n case 1 :\n case 3 :\n {\n #pragma omp atomic update\n degrees[src]++;\n }\n break;\n case 2 :\n case 4 :\n {\n #pragma omp atomic update\n degrees[dest]++;\n }\n break;\n case 5 :\n case 6 :\n {\n #pragma omp atomic update\n degrees[dest]++;\n #pragma omp atomic update\n degrees[src]++;\n }\n break;\n default :\n {\n #pragma omp atomic update\n degrees[src]++;\n }// out-degree\n }\n } #pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,mmode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/ VERTEX_VALUE_LUKEWARM_U32\n }\n\n cache_regions[3] = UINT32_MAX; // VERTEX_CACHE_COLD_U32\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n mask_array[i] = VERTEX_CACHE_COLD_U32;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nds(timer));\n printf(\" -----------------------------------------------------\\n\");\n }\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n edgeList->mask_array[i] = mask_array[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,lmode)", "context_chars": 100, "text": "lmode)\n{\n\n uint32_t i;\n uint32_t src;\n uint32_t dest;\n\n\n\n if(lmode != 10)\n {\n for(i = 0; i < edgeList->num_edges; i++)\n {\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n switch(lmode)\n {\n case 1 :\n case 4 :\n case 6 :\n case 8 :\n {\n #pragma omp atomic update\n degrees[src]++;\n } // degree\n break;\n case 2 :\n case 5 :\n case 7 :\n case 9 :\n {\n #pragma omp atomic update\n degrees[dest]++;\n }\n break;\n case 3 :\n {\n #pragma omp atomic update\n degrees[dest]++;\n #pragma omp atomic update\n degrees[src]++;\n }\n break;\n default :\n {\n #pragma omp atomic update\n degrees[src]++;\n }// out-degree\n }\n } #pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,lmode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mt19937var)", "context_chars": 100, "text": "*) my_malloc(sizeof(mt19937state));\n initializeMersenneState (mt19937var, 27491095);\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n degrees[i] = (generateRandInt(mt19937var) % edgeList->num_vertices) + omp_get_thread_num();\n } #pragma omp parallel for firstprivate(mt19937var)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ruct EdgeList *relabelEdgeList(struct EdgeList *edgeList, uint32_t *labels)\n{\n\n uint32_t i;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n uint32_t src;\n uint32_t dest;\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n edgeList->edges_array_src[i] = labels[src];\n edgeList->edges_array_dest[i] = labels[dest];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ruct EdgeList *maskEdgeList(struct EdgeList *edgeList, uint32_t *mask_array)\n{\n uint32_t i;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n uint32_t src;\n uint32_t dest;\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n edgeList->edges_array_src[i] = src | mask_array[src];\n edgeList->edges_array_dest[i] = dest | mask_array[dest];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " break;\n\n }\n fclose(pText);\n\n\n\n edgeList = relabelEdgeList(edgeList, labels);\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/countsort.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_weight[i];\n#endif\n\n vertex_count[(t_id * num_vertices) + key]++;\n\n }\n\n }\n\n for(i = 0; i < num_edges; i++)\n {\n edgeList->edges_array_dest[i] = sorted_edges_array->edges_array_dest[i];\n edgeList->edges_array_src[i] = sorted_edges_array->edges_array_src[i];\n#if WEIGHTED\n edgeList->edges_array_weight[i] = sorted_edges_array->edges_array_weight[i] ;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/preprocess/countsort.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "weight[i];\n#endif\n\n vertex_count[(t_id * num_vertices) + key]++;\n\n }\n\n }\n\n\n for(i = 0; i < num_edges; i++)\n {\n edgeList->edges_array_dest[i] = sorted_edges_array->edges_array_dest[i];\n edgeList->edges_array_src[i] = sorted_edges_array->edges_array_src[i];\n#if WEIGHTED\n edgeList->edges_array_weight[i] = sorted_edges_array->edges_array_weight[i] ;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/DFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for DFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/DFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "cessed_nodes = 0;\n stats->num_vertices = graph->num_vertices;\n stats->time_total = 0.0f;\n\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/DFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "essed_nodes = 0;\n stats->num_vertices = graph->num_vertices;\n stats->time_total = 0.0f;\n\n\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/DFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "essed_nodes = 0;\n stats->num_vertices = graph->num_vertices;\n stats->time_total = 0.0f;\n\n\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt32_t));\n stats->parents = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt32_t));\n stats->parents = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt32_t));\n stats->parents = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt32_t));\n stats->parents = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:maxDistance) reduction(+:numberOfDiscoverNodes) reduction(min:minDistance)", "context_chars": 100, "text": "istance = UINT_MAX / 2;\n uint32_t maxDistance = 0;\n uint32_t numberOfDiscoverNodes = 0;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n\n if(stats->distances[v] != UINT_MAX / 2)\n {\n\n numberOfDiscoverNodes++;\n\n if(minDistance > stats->distances[v] && stats->distances[v] != 0)\n minDistance = stats->distances[v];\n\n if(maxDistance < stats->distances[v])\n maxDistance = stats->distances[v];\n\n\n }\n\n } #pragma omp parallel for reduction(max:maxDistance) reduction(+:numberOfDiscoverNodes) reduction(min:minDistance)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": " stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n // #pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,8)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n float weight = 1;\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n if(getBit(bitmapCurr, src))\n {\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n }\n } #pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,8)", "context_chars": 100, "text": "rtitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n float weight = 1;\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n if(getBit(bitmapCurr, src))\n {\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": " stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // #pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n float weight = 1;\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n if(getBit(bitmapCurr, src))\n {\n // if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n // else\n // activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n }\n } #pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "rtitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n float weight = 1;\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n if(getBit(bitmapCurr, src))\n {\n // if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n // else\n // activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(e,src,dest) shared(graph) reduction(+:edgesPlusCounter,edgesMinusCounter)", "context_chars": 100, "text": " = 0;\n uint32_t edgesMinusCounter = 0;\n uint32_t e;\n uint32_t src;\n uint32_t dest;\n\n for(e = 0 ; e < graph->num_edges ; e++)\n {\n\n src = graph->sorted_edges_array->edges_array_src[e];\n dest = graph->sorted_edges_array->edges_array_dest[e];\n if(src <= dest)\n {\n edgesPlusCounter++;\n }\n else if (src > dest)\n {\n edgesMinusCounter++;\n }\n } #pragma omp parallel for private(e,src,dest) shared(graph) reduction(+:edgesPlusCounter,edgesMinusCounter)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(e,src,dest) shared(edgesMinus_idx,edgesPlus_idx, edgesPlus,edgesMinus,graph)", "context_chars": 100, "text": "tices = graph->num_vertices;\n\n uint32_t edgesPlus_idx = 0;\n uint32_t edgesMinus_idx = 0;\n\n for(e = 0 ; e < graph->num_edges ; e++)\n {\n uint32_t localEdgesPlus_idx = 0;\n uint32_t localEdgesMinus_idx = 0;\n\n src = graph->sorted_edges_array->edges_array_src[e];\n dest = graph->sorted_edges_array->edges_array_dest[e];\n if(src <= dest)\n {\n localEdgesPlus_idx = __sync_fetch_and_add(&edgesPlus_idx, 1);\n\n edgesPlus->edges_array_src[localEdgesPlus_idx] = graph->sorted_edges_array->edges_array_src[e];\n edgesPlus->edges_array_dest[localEdgesPlus_idx] = graph->sorted_edges_array->edges_array_dest[e];\n#if WEIGHTED\n edgesPlus->edges_array_weight[localEdgesPlus_idx] = graph->sorted_edges_array->edges_array_weight[e];\n\n }\n else if (src > dest)\n {\n localEdgesMinus_idx = __sync_fetch_and_add(&edgesMinus_idx, 1);\n\n edgesMinus->edges_array_src[localEdgesMinus_idx] = graph->sorted_edges_array->edges_array_src[e];\n edgesMinus->edges_array_dest[localEdgesMinus_idx] = graph->sorted_edges_array->edges_array_dest[e];\n#if WEIGHTED\n edgesMinus->edges_array_weight[localEdgesMinus_idx] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n }\n } #pragma omp parallel for private(e,src,dest) shared(edgesMinus_idx,edgesPlus_idx, edgesPlus,edgesMinus,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(vertices,sorted_edges_array,graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "imer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t minDistance = UINT_MAX / 2;\n uint32_t minParent = UINT_MAX;\n uint32_t degree;\n uint32_t j, u, w;\n uint32_t edge_idx;\n\n if(getBit(bitmapCurr, v))\n {\n\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n // printf(\"degree %u arguments->source %u \\n\",degree,v );\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array->edges_array_dest[j]);\n w = 1;\n#if WEIGHTED\n w = sorted_edges_array->edges_array_weight[j];\n\n\n if (minDistance > (stats->distances[u] + w))\n {\n minDistance = (stats->distances[u] + w);\n minParent = u;\n }\n }\n\n if(bellmanFordAtomicMin(&(stats->distances[v]), minDistance))\n {\n stats->parents[v] = minParent;\n\n degree = graph->vertices->out_degree[v];\n edge_idx = graph->vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n if(!getBit(bitmapNext, u))\n {\n activeVertices++;\n setBitAtomic(bitmapNext, u);\n }\n }\n }\n }\n } #pragma omp parallel for private(v) shared(vertices,sorted_edges_array,graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "imer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n if(getBit(bitmapCurr, v))\n {\n\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t src = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graph->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "----------------------------------------\\n\");\n\n\n\n\n Start(timer);\n\n\n\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vertices[v] = v;\n degrees[v] = graph->vertices->out_degree[v];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(v,n) shared(vertices,graphPlus,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "imer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n for(n = 0; n < graphPlus->num_vertices; n++)\n {\n\n v = vertices[n];\n\n if(getBit(bitmapCurr, v))\n {\n\n uint32_t degree = graphPlus->vertices->out_degree[v];\n uint32_t edge_idx = graphPlus->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n\n uint32_t src = EXTRACT_VALUE(graphPlus->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graphPlus->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graphPlus->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(v,n) shared(vertices,graphPlus,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(v,n) shared(vertices,graphMinus,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "omicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n }\n\n for(n = 0; n < graphMinus->num_vertices; n++)\n {\n\n v = vertices[n];\n\n if(getBit(bitmapCurr, v))\n {\n\n uint32_t degree = graphMinus->vertices->out_degree[v];\n uint32_t edge_idx = graphMinus->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n\n uint32_t src = EXTRACT_VALUE(graphMinus->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graphMinus->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graphMinus->sorted_edges_array->edges_array_weight[j];\n\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(v,n) shared(vertices,graphMinus,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(nodes,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "imer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t minDistance = UINT_MAX / 2;\n uint32_t degree;\n uint32_t j, u, w;\n\n uint32_t minParent = UINT_MAX;\n\n if(getBit(bitmapCurr, v))\n {\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n // printf(\"degree %u arguments->source %u \\n\",degree,v );\n for(j = 0 ; j < (degree) ; j++)\n {\n u = nodes->edges_array_dest[j];\n w = 1;\n#if WEIGHTED\n w = nodes->edges_array_weight[j];\n\n // printf(\"w %u \\n\",w );\n if (minDistance > (stats->distances[u] + w))\n {\n minDistance = (stats->distances[u] + w);\n minParent = u;\n }\n }\n\n if(bellmanFordAtomicMin(&(stats->distances[v]), minDistance))\n {\n // stats->distances[v] = minDistance;\n stats->parents[v] = minParent;\n nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = nodes->edges_array_dest[j];\n w = 1;\n#if WEIGHTED\n w = nodes->edges_array_weight[j];\n\n\n if(!getBit(bitmapNext, u))\n {\n activeVertices++;\n setBitAtomic(bitmapNext, u);\n }\n }\n }\n }\n } #pragma omp parallel for private(nodes,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(nodes,degree,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "mer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n if(getBit(bitmapCurr, v))\n {\n\n degree = graph->vertices[v].out_degree;\n nodes = graph->vertices[v].outNodes;\n uint32_t j;\n for(j = 0 ; j < (degree) ; j++)\n {\n\n uint32_t src = nodes->edges_array_src[j];\n uint32_t dest = nodes->edges_array_dest[j];\n float weight = 1;\n#if WEIGHTED\n weight = nodes->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n\n }\n } #pragma omp parallel for private(nodes,degree,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(nodes,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "mer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t minDistance = UINT_MAX / 2;\n uint32_t degree;\n uint32_t j, u, w;\n\n\n if(getBit(bitmapCurr, v))\n {\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = nodes->dest;\n w = 1;\n#if WEIGHTED\n w = nodes->weight;\n\n nodes = nodes->next;\n\n if (minDistance > (stats->distances[u] + w))\n {\n minDistance = (stats->distances[u] + w);\n }\n }\n\n if(bellmanFordAtomicMin(&(stats->distances[v]), minDistance))\n {\n stats->parents[v] = minDistance;\n\n nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = nodes->dest;\n w = 1;\n#if WEIGHTED\n w = nodes->weight;\n\n nodes = nodes->next;\n\n if(!getBit(bitmapNext, u))\n {\n activeVertices++;\n setBitAtomic(bitmapNext, u);\n }\n }\n }\n }\n } #pragma omp parallel for private(nodes,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(nodes,degree,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "mer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n if(getBit(bitmapCurr, v))\n {\n\n degree = graph->vertices[v].out_degree;\n nodes = graph->vertices[v].outNodes;\n uint32_t j;\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = nodes->dest;\n uint32_t w = 1;\n#if WEIGHTED\n w = nodes->weight;\n\n nodes = nodes->next;\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(v, u, w, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(v, u, w, stats, bitmapNext);\n }\n\n }\n } #pragma omp parallel for private(nodes,degree,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "al = 0.0f;\n stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "al = 0.0f;\n stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "al = 0.0f;\n stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "al = 0.0f;\n stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for shared(stats) schedule(dynamic, 128)", "context_chars": 100, "text": ");\n struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));\n\n Start(timer);\n\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t degree_u = graph->vertices->out_degree[u];\n uint32_t edge_idx_u = graph->vertices->edges_idx[u];\n uint32_t v;\n\n for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)\n {\n uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);\n uint32_t degree_v = graph->vertices->out_degree[node_v];\n uint32_t edge_idx_v = graph->vertices->edges_idx[node_v];\n uint32_t w;\n\n uint32_t degree_iter = graph->vertices->out_degree[u];\n uint32_t edge_idx_iter = graph->vertices->edges_idx[u];\n uint32_t iter;\n\n for(w = edge_idx_v; w < (edge_idx_v + degree_v) ; w++)\n {\n uint32_t node_w = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[w]);\n uint32_t node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_iter]);\n\n for(iter = edge_idx_iter; iter < (edge_idx_iter + degree_iter) ; iter++)\n {\n node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);\n\n if(node_iter == node_w)\n // #pragma omp atomic update\n stats->counts[u]++;\n }\n }\n }\n } #pragma omp parallel for shared(stats) schedule(dynamic, 128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)", "context_chars": 100, "text": " }\n }\n }\n }\n\n Stop(timer);\n stats->time_total = Seconds(timer);\n\n for(u = 0; u < stats->num_vertices; u++)\n {\n counts += stats->counts[u];\n } #pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)", "context_chars": 100, "text": ");\n struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));\n\n Start(timer);\n\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t degree_u = graph->vertices->out_degree[u];\n uint32_t edge_idx_u = graph->vertices->edges_idx[u];\n uint32_t v;\n\n steps++;\n for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)\n {\n uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);\n uint32_t degree_v = graph->vertices->out_degree[node_v];\n\n if(node_v > u)\n break;\n\n uint32_t edge_idx_v = graph->vertices->edges_idx[node_v];\n uint32_t w;\n\n\n uint32_t degree_iter = graph->vertices->out_degree[u];\n uint32_t edge_idx_iter = graph->vertices->edges_idx[u];\n uint32_t iter;\n\n for(w = edge_idx_v; w < (edge_idx_v + degree_v) ; w++)\n {\n\n uint32_t node_w = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[w]);\n if(node_w > node_v)\n break;\n\n uint32_t node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_iter]);\n\n\n\n for(iter = edge_idx_iter; iter < (edge_idx_iter + degree_iter) ; iter++)\n {\n node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);\n\n if(node_iter >= node_w)\n break;\n }\n\n\n if(node_w == node_iter)\n {\n counts++;\n }\n }\n }\n } #pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for shared(stats) schedule(dynamic, 128)", "context_chars": 100, "text": ");\n struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));\n\n Start(timer);\n\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t degree_u = graph->vertices->out_degree[u];\n uint32_t edge_idx_u = graph->vertices->edges_idx[u];\n uint32_t v;\n\n for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)\n {\n uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);\n\n if(node_v > u)\n break;\n\n uint32_t degree_v = graph->vertices->out_degree[node_v];\n uint32_t edge_idx_v = graph->vertices->edges_idx[node_v];\n uint32_t w;\n\n uint32_t degree_iter = graph->vertices->out_degree[u];\n uint32_t edge_idx_iter = graph->vertices->edges_idx[u];\n uint32_t iter;\n\n for(w = edge_idx_v; w < (edge_idx_v + degree_v) ; w++)\n {\n\n uint32_t node_w = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[w]);\n\n if(node_w > node_v)\n break;\n\n uint32_t node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_iter]);\n\n for(iter = edge_idx_iter; iter < (edge_idx_iter + degree_iter) ; iter++)\n {\n node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);\n\n if(node_iter >= node_w)\n break;\n }\n\n if(node_w == node_iter)\n {\n #pragma omp atomic update\n stats->counts[node_w]++;\n }\n }\n }\n } #pragma omp parallel for shared(stats) schedule(dynamic, 128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)", "context_chars": 100, "text": " }\n }\n }\n }\n\n Stop(timer);\n stats->time_total = Seconds(timer);\n\n for(u = 0; u < stats->num_vertices; u++)\n {\n counts += stats->counts[u];\n } #pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)", "context_chars": 100, "text": ");\n struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));\n\n Start(timer);\n\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t degree_u = graph->vertices->out_degree[u];\n uint32_t edge_idx_u = graph->vertices->edges_idx[u];\n uint32_t v;\n\n steps++;\n for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)\n {\n uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);\n\n if(node_v > u)\n break;\n counts += countIntersectionsBinarySearch(u, node_v, graph);\n }\n } #pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "f(float));\n stats->vector_input = (float *) my_malloc(graph->num_vertices * sizeof(float));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->vector_output[v] = 0.0f;\n stats->vector_input[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "f(float));\n stats->vector_input = (float *) my_malloc(graph->num_vertices * sizeof(float));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->vector_output[v] = 0.0f;\n stats->vector_input[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "(float));\n stats->vector_input = (float *) my_malloc(graph->num_vertices * sizeof(float));\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->vector_output[v] = 0.0f;\n stats->vector_input[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "f(float));\n stats->vector_input = (float *) my_malloc(graph->num_vertices * sizeof(float));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->vector_output[v] = 0.0f;\n stats->vector_input[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": ">iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n float weight = 0.0001f;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n // #pragma omp atomic update\n // __sync_fetch_and_add(&stats->vector_output[dest],(weight * stats->vector_input[src]));\n // addAtomicFloat(&stats->vector_output[dest], (weight * stats->vector_input[src])\n\n // #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "lPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n float weight = 0.0001f;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n // #pragma omp atomic update\n // __sync_fetch_and_add(&stats->vector_output[dest],(weight * stats->vector_input[src]));\n // addAtomicFloat(&stats->vector_output[dest], (weight * stats->vector_input[src])\n\n // #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ts->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n float weight = 0.0001f;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n // #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "rtitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n float weight = 0.0001f;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n // #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/ graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": ">iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = DoubleToFixed64(partition->edgeList->edges_array_weight[k]);\n\n // #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "lPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = DoubleToFixed64(partition->edgeList->edges_array_weight[k]);\n\n // #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/ graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ts->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = DoubleToFixed64(partition->edgeList->edges_array_weight[k]);\n\n\n // #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "rtitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = DoubleToFixed64(partition->edgeList->edges_array_weight[k]);\n\n\n // #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src ;\n uint32_t dest = v;\n float weight = 0.0001f;\n degree = vertices->out_degree[dest];\n edge_idx = vertices->edges_idx[dest];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n src = EXTRACT_VALUE(sorted_edges_array[j]);\n#if WEIGHTED\n weight = edges_array_weight[j];\n\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n float weight = 0.0001f;\n degree = vertices->out_degree[src];\n edge_idx = vertices->edges_idx[src];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n dest = EXTRACT_VALUE(sorted_edges_array[j]);\n#if WEIGHTED\n weight = edges_array_weight[j];\n\n\n #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "WEIGHTED\n edges_array_weight = graph->sorted_edges_array->edges_array_weight;\n#endif\n#endif\n\n for (w = 0; w < graph->num_edges ; ++w)\n {\n#if WEIGHTED\n edges_array_weight_fixedPoint[w] = FloatToFixed32(edges_array_weight[w]);\n#else\n edges_array_weight_fixedPoint[w] = FloatToFixed32(0.0001f);\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "h->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = FloatToFixed32(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n float weight = FloatToFixed32(0.0001f);\n degree = vertices->out_degree[dest];\n edge_idx = vertices->edges_idx[dest];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n src = EXTRACT_VALUE(sorted_edges_array[j]);\n#if WEIGHTED\n weight = edges_array_weight_fixedPoint[j];\n\n vector_output[dest] += MULFixed32V1(weight, vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed32ToFloat(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "ertices; v++)\n {\n stats->vector_output[v] = Fixed32ToFloat(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "aph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n degree = vertices->out_degree[src];\n edge_idx = vertices->edges_idx[src];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n dest = EXTRACT_VALUE(sorted_edges_array[j]);\n#if WEIGHTED\n weight = DoubleToFixed64(edges_array_weight[j]);\n\n\n #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n float weight = 0.0001f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[dest].inNodes;\n degree = graph->vertices[dest].in_degree;\n#else\n Nodes = graph->vertices[dest].outNodes;\n degree = graph->vertices[dest].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n src = Nodes->edges_array_dest[j];\n#if WEIGHTED\n weight = Nodes->edges_array_weight[j];\n\n stats->vector_output[dest] += (weight * stats->vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n float weight = 0.0001f;\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n dest = Nodes->edges_array_dest[j];\n#if WEIGHTED\n weight = Nodes->edges_array_weight[j];\n\n\n #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "raph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n uint64_t weight = DoubleToFixed64(0.0001f);\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[dest].inNodes;\n degree = graph->vertices[dest].in_degree;\n#else\n Nodes = graph->vertices[dest].outNodes;\n degree = graph->vertices[dest].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n src = Nodes->edges_array_dest[j];\n\n\n#if WEIGHTED\n weight = DoubleToFixed64(Nodes->edges_array_weight[j]);\n\n\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "tices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "raph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n dest = Nodes->edges_array_dest[j];\n#if WEIGHTED\n weight = DoubleToFixed64(Nodes->edges_array_weight[j]);\n\n #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n float weight = 0.0001f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[dest].inNodes;\n degree = graph->vertices[dest].in_degree;\n#else\n Nodes = graph->vertices[dest].outNodes;\n degree = graph->vertices[dest].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n src = Nodes->dest;\n#if WEIGHTED\n weight = Nodes->weight;\n\n Nodes = Nodes->next;\n\n stats->vector_output[dest] += (weight * stats->vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n float weight = 0.0001f;\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n\n dest = Nodes->dest;\n#if WEIGHTED\n weight = Nodes->weight;\n\n Nodes = Nodes->next;\n\n #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "raph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n uint64_t weight = DoubleToFixed64(0.0001f);\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[dest].inNodes;\n degree = graph->vertices[dest].in_degree;\n#else\n Nodes = graph->vertices[dest].outNodes;\n degree = graph->vertices[dest].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n src = Nodes->dest;\n\n#if WEIGHTED\n weight = DoubleToFixed64(Nodes->weight);\n\n Nodes = Nodes->next;\n\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "raph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n dest = Nodes->dest;\n\n#if WEIGHTED\n weight = DoubleToFixed64(Nodes->weight);\n\n Nodes = Nodes->next;\n\n #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));\n stats->labels = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->components[v] = v;\n stats->labels[v] = v;\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));\n stats->labels = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->components[v] = v;\n stats->labels[v] = v;\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));\n stats->labels = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->components[v] = v;\n stats->labels[v] = v;\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));\n stats->labels = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->components[v] = v;\n stats->labels[v] = v;\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": ";\n\n }\n\n}\n\n\nvoid compressNodes(uint32_t num_vertices, uint32_t *components)\n{\n uint32_t n;\n for (n = 0; n < num_vertices; n++)\n {\n while (components[n] != components[components[n]])\n {\n components[n] = components[components[n]];\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n degree = graph->vertices->out_degree[src];\n edge_idx = graph->vertices->edges_idx[src];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n dest = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n if(comp_src == comp_dest)\n continue;\n\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n }\n }\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "t(timer);\n for(r = 0; r < stats->neighbor_rounds; r++)\n {\n Start(timer_inner);\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n uint32_t degree_out = graph->vertices->out_degree[u];\n uint32_t edge_idx_out = graph->vertices->edges_idx[u];\n\n for(j = (edge_idx_out + r) ; j < (edge_idx_out + degree_out) ; j++)\n {\n v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n break;\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "-----------------------------------------------------\\n\");\n Start(timer_inner);\n#if DIRECTED\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n uint32_t degree_out;\n uint32_t degree_in;\n uint32_t edge_idx_out;\n uint32_t edge_idx_in;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n degree_out = graph->vertices->out_degree[u];\n edge_idx_out = graph->vertices->edges_idx[u];\n\n for(j = (edge_idx_out + stats->neighbor_rounds) ; j < (edge_idx_out + degree_out) ; j++)\n {\n v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n }\n\n degree_in = graph->inverse_vertices->out_degree[u];\n edge_idx_in = graph->inverse_vertices->edges_idx[u];\n\n for(j = (edge_idx_in) ; j < (edge_idx_in + degree_in) ; j++)\n {\n v = EXTRACT_VALUE(graph->inverse_sorted_edges_array->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n }\n\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "y->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n }\n\n }\n#else\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n uint32_t degree_out;\n uint32_t edge_idx_out;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n degree_out = graph->vertices->out_degree[u];\n edge_idx_out = graph->vertices->edges_idx[u];\n\n for(j = (edge_idx_out + stats->neighbor_rounds) ; j < (edge_idx_out + degree_out) ; j++)\n {\n v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n degree = graph->vertices->out_degree[src];\n edge_idx = graph->vertices->edges_idx[src];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n dest = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n }\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:change)", "context_chars": 100, "text": " }\n }\n\n\n // compressNodes( stats->num_vertices, stats->components);\n\n for(v = 0 ; v < ((bitmapNext->size + kBitsPerWord - 1) / kBitsPerWord); v++)\n {\n change += bitmapNext->bitarray[v];\n bitmapNext->bitarray[v] = 0;\n } #pragma omp parallel for reduction (+:change)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n\n if(comp_src != comp_dest)\n {\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n }\n }\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n\n if(comp_src != comp_dest)\n {\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n }\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,neighbor)", "context_chars": 100, "text": ");\n struct Bitmap *linked = newBitmap(graph->num_vertices);\n\n stats->neighbor_rounds = 2;\n for(v = 0; v < graph->num_vertices; v++)\n {\n neighbor[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,neighbor)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "t(timer);\n for(r = 0; r < stats->neighbor_rounds; r++)\n {\n Start(timer_inner);\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(neighbor[src] >= r && !getBit(linked, src))\n {\n linkNodes(src, dest, stats->components);\n setBit(linked, src);\n }\n else\n {\n neighbor[src]++;\n }\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(neighbor[src] >= r && !getBit(linked, src))\n {\n linkNodes(src, dest, stats->components);\n setBit(linked, src);\n }\n else\n {\n neighbor[src]++;\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats,neighbor)", "context_chars": 100, "text": " Stop(timer_inner);\n printf(\"| %-21u | %-27f | \\n\", r, Seconds(timer_inner));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n neighbor[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats,neighbor)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "-----------------------------------------------------\\n\");\n Start(timer_inner);\n#if DIRECTED\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(stats->components[src] != sampleComp)\n {\n\n if(neighbor[src] >= stats->neighbor_rounds)\n {\n linkNodes(src, dest, stats->components);\n }\n else\n {\n neighbor[src]++;\n }\n\n }\n\n if(stats->components[dest] != sampleComp)\n {\n linkNodes(dest, src, stats->components);\n }\n\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ts->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(stats->components[src] != sampleComp)\n {\n\n if(neighbor[src] >= stats->neighbor_rounds)\n {\n linkNodes(src, dest, stats->components);\n }\n else\n {\n neighbor[src]++;\n }\n\n }\n\n if(stats->components[dest] != sampleComp)\n {\n linkNodes(dest, src, stats->components);\n }\n\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "linkNodes(dest, src, stats->components);\n }\n\n }\n }\n }\n#else\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(stats->components[src] != sampleComp)\n {\n\n if(neighbor[src] >= stats->neighbor_rounds)\n {\n linkNodes(src, dest, stats->components);\n }\n else\n {\n neighbor[src]++;\n }\n }\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ts->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(stats->components[src] != sampleComp)\n {\n\n if(neighbor[src] >= stats->neighbor_rounds)\n {\n linkNodes(src, dest, stats->components);\n }\n else\n {\n neighbor[src]++;\n }\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": " Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:change)", "context_chars": 100, "text": " }\n }\n\n\n // compressNodes( stats->num_vertices, stats->components);\n for(v = 0 ; v < ((bitmapNext->size + kBitsPerWord - 1) / kBitsPerWord); v++)\n {\n change += bitmapNext->bitarray[v];\n bitmapNext->bitarray[v] = 0;\n } #pragma omp parallel for reduction (+:change)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n dest = Nodes->edges_array_dest[j];\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n\n if(comp_src == comp_dest)\n continue;\n\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n }\n }\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "t(timer);\n for(r = 0; r < stats->neighbor_rounds; r++)\n {\n Start(timer_inner);\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n struct EdgeList *Nodes = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = (0 + r) ; j < (degree_out) ; j++)\n {\n v = Nodes->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n break;\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "-----------------------------------------------------\\n\");\n Start(timer_inner);\n#if DIRECTED\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n struct EdgeList *Nodes_out = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = ( 0 + stats->neighbor_rounds) ; j < (degree_out) ; j++)\n {\n v = Nodes_out->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n }\n\n struct EdgeList *Nodes_in = graph->vertices[u].inNodes;\n uint32_t degree_in = graph->vertices[u].in_degree;\n\n for(j = (0) ; j < (degree_in) ; j++)\n {\n v = Nodes_in->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n }\n\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "in->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n }\n\n }\n#else\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n struct EdgeList *Nodes_out = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = ( 0 + stats->neighbor_rounds) ; j < (degree_out) ; j++)\n {\n v = Nodes_out->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n struct EdgeList *Nodes_out = graph->vertices[v].outNodes;\n uint32_t degree_out = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree_out) ; j++)\n {\n dest = Nodes_out->edges_array_dest[j];\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n }\n } #pragma omp parallel for private(v) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:change)", "context_chars": 100, "text": " }\n }\n\n\n // compressNodes( stats->num_vertices, stats->components);\n\n for(v = 0 ; v < ((bitmapNext->size + kBitsPerWord - 1) / kBitsPerWord); v++)\n {\n change += bitmapNext->bitarray[v];\n bitmapNext->bitarray[v] = 0;\n } #pragma omp parallel for reduction (+:change)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n\n dest = Nodes->dest;\n Nodes = Nodes->next;\n\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n\n if(comp_src == comp_dest)\n continue;\n\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n }\n }\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "t(timer);\n for(r = 0; r < stats->neighbor_rounds; r++)\n {\n Start(timer_inner);\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n struct AdjLinkedListNode *Nodes = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = (0 + r) ; j < (degree_out) ; j++)\n {\n v = Nodes->dest;\n Nodes = Nodes->next;\n\n linkNodes(u, v, stats->components);\n break;\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "-----------------------------------------------------\\n\");\n Start(timer_inner);\n#if DIRECTED\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n struct AdjLinkedListNode *Nodes_out = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = ( 0 + stats->neighbor_rounds) ; j < (degree_out) ; j++)\n {\n v = Nodes_out->dest;\n Nodes_out = Nodes_out->next;\n\n linkNodes(u, v, stats->components);\n }\n\n struct AdjLinkedListNode *Nodes_in = graph->vertices[u].inNodes;\n uint32_t degree_in = graph->vertices[u].in_degree;\n\n for(j = (0) ; j < (degree_in) ; j++)\n {\n v = Nodes_in->dest;\n Nodes_in = Nodes_in->next;\n\n linkNodes(u, v, stats->components);\n }\n\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "es_in = Nodes_in->next;\n\n linkNodes(u, v, stats->components);\n }\n\n }\n#else\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n struct AdjLinkedListNode *Nodes_out = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = ( 0 + stats->neighbor_rounds) ; j < (degree_out) ; j++)\n {\n v = Nodes_out->dest;\n Nodes_out = Nodes_out->next;\n\n\n linkNodes(u, v, stats->components);\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n struct AdjLinkedListNode *Nodes_out = graph->vertices[v].outNodes;\n uint32_t degree_out = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree_out) ; j++)\n {\n dest = Nodes_out->dest;\n Nodes_out = Nodes_out->next;\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n }\n } #pragma omp parallel for private(v) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:change)", "context_chars": 100, "text": " }\n }\n\n\n // compressNodes( stats->num_vertices, stats->components);\n\n for(v = 0 ; v < ((bitmapNext->size + kBitsPerWord - 1) / kBitsPerWord); v++)\n {\n change += bitmapNext->bitarray[v];\n bitmapNext->bitarray[v] = 0;\n } #pragma omp parallel for reduction (+:change)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/betweennessCentrality.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = UINT32_MAX;\n stats->dependency[vertex_id] = 0.0f;\n stats->betweennessCentrality[vertex_id] = 0.0f;\n stats->sigma[vertex_id] = 0;\n stats->realRanks[vertex_id] = vertex_id;\n stats->stack->nodes[vertex_id] = 0;\n if(graph->vertices->out_degree[vertex_id])\n stats->parents[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);\n else\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/betweennessCentrality.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < stats->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = UINT32_MAX;\n stats->dependency[vertex_id] = 0.0f;\n stats->sigma[vertex_id] = 0;\n stats->stack->nodes[vertex_id] = 0;\n stats->predecessors[vertex_id].degree = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/betweennessCentrality.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": " graph->vertices;\n sorted_edges_array = graph->sorted_edges_array->edges_array_dest;\n#endif\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n out_degree = vertices->out_degree[v];\n if(stats->distances[v] == UINT32_MAX) // optmization\n {\n edge_idx = vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n\n if(getBit(bitmapCurr, u))\n {\n // stats->parents[v] = u;\n stats->distances[v] = stats->distances[u] + 1;\n\n if(stats->distances[v] == stats->distances[u] + 1)\n {\n\n stats->sigma[v] += stats->sigma[u];\n stats->predecessors[v].nodes[stats->predecessors[v].degree] = u;\n stats->predecessors[v].degree++;\n }\n\n setBitAtomic(bitmapNext, v);\n nf++;\n // break;\n }\n }\n }\n } #pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n // stats->parents_DualOrder[vertex_id] = 0;\n if(graph->vertices->out_degree[vertex_id])\n {\n stats->parents[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);\n stats->parents_DualOrder[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);\n }\n else\n {\n stats->parents[vertex_id] = -1;\n stats->parents_DualOrder[vertex_id] = -1;\n }\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "s->iteration = 0;\n stats->num_vertices = graph->num_vertices;\n stats->time_total = 0.0f;\n\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n if(graph->vertices[vertex_id].out_degree)\n stats->parents[vertex_id] = graph->vertices[vertex_id].out_degree * (-1);\n else\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n if(graph->vertices[vertex_id].out_degree)\n stats->parents[vertex_id] = graph->vertices[vertex_id].out_degree * (-1);\n else\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(parents,parents_DualOrder,labels,num_vertices) num_threads(num_threads_max)", "context_chars": 100, "text": "int32_t vertex_v;\n int *parents_temp;\n uint32_t num_threads_max = omp_get_max_threads();\n\n for(vertex_id = 0; vertex_id < num_vertices ; vertex_id++)\n {\n vertex_v = labels[vertex_id];\n // vertex_u = inv_labels[vertex_id];\n\n if((*parents)[vertex_id] >= 0)\n {\n (*parents_DualOrder)[vertex_v] = labels[(*parents)[vertex_id]];\n }\n else\n {\n (*parents_DualOrder)[vertex_v] = (*parents)[vertex_id];\n }\n\n } #pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(parents,parents_DualOrder,labels,num_vertices) num_threads(num_threads_max)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(distances,distances_DualOrder,labels,num_vertices) num_threads(num_threads_max)", "context_chars": 100, "text": " vertex_u;\n uint32_t *distances_temp;\n uint32_t num_threads_max = omp_get_max_threads();\n\n for(vertex_id = 0; vertex_id < num_vertices ; vertex_id++)\n {\n vertex_v = labels[vertex_id];\n // vertex_u = inv_labels[vertex_id];\n distances_DualOrder[vertex_v] = distances[vertex_id];\n } #pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(distances,distances_DualOrder,labels,num_vertices) num_threads(num_threads_max)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": "graph->vertices;\n sorted_edges_array = graph->sorted_edges_array->edges_array_dest;\n#endif\n\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n out_degree = vertices->out_degree[v];\n if(stats->parents[v] < 0) // optmization\n {\n edge_idx = vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n if(getBit(bitmapCurr, u))\n {\n stats->parents[v] = u;\n //we are not considering distance array as it is not implemented in AccelGraph\n stats->distances[v] = stats->distances[u] + 1;\n setBitAtomic(bitmapNext, v);\n nf++;\n break;\n }\n }\n\n }\n\n } #pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": " graph->vertices;\n sorted_edges_array = graph->sorted_edges_array->edges_array_dest;\n#endif\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n out_degree = vertices->out_degree[v];\n if(stats->parents[v] < 0) // optmization\n {\n\n edge_idx = vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)\n {\n\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n if(getBit(bitmapCurr, u))\n {\n stats->parents[v] = u;\n //we are not considering distance array as it is not implemented in AccelGraph\n stats->distances[v] = stats->distances[u] + 1;\n setBitAtomic(bitmapNext, v);\n nf++;\n break;\n }\n }\n\n }\n\n } #pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));\n\n\n uint32_t i;\n for(i = 0 ; i < P ; i++)\n {\n localFrontierQueues[i] = newArrayQueue(graph->num_vertices);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n for(i = 0 ; i < P ; i++)\n {\n freeArrayQueue(localFrontierQueues[i]);\n }\n\n // // for(i=0 ; i < P*P ; i++){\n // freeArrayQueue(localFrontierQueuesL2[i]);\n // } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));\n\n\n uint32_t i;\n for(i = 0 ; i < P ; i++)\n {\n localFrontierQueues[i] = newArrayQueue(graph->num_vertices);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n for(i = 0 ; i < P ; i++)\n {\n freeArrayQueue(localFrontierQueues[i]);\n }\n\n // // for(i=0 ; i < P*P ; i++){\n // freeArrayQueue(localFrontierQueuesL2[i]);\n // } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(i,stats,totalPartitions,localFrontierQueues ,sharedFrontierQueue, graph)", "context_chars": 100, "text": "xP\n\n\n\n\n uint32_t i;\n\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t t_id = omp_get_thread_num();\n // uint32_t A = 0;\n struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];\n\n\n if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j))\n {\n // #pragma omp task untied\n // {\n\n breadthFirstSearchPartitionGraphGrid(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), sharedFrontierQueue, localFrontierQueue, stats);\n flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);\n // }\n\n }\n } #pragma omp parallel for default(none) shared(i,stats,totalPartitions,localFrontierQueues ,sharedFrontierQueue, graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(graph,sharedFrontierQueue) private(i,v) schedule(dynamic,1024)", "context_chars": 100, "text": "aphGridResetActivePartitions(graph->grid);\n graphGridResetActivePartitionsMap(graph->grid);\n\n for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)\n {\n v = sharedFrontierQueue->queue[i];\n // graphGridSetActivePartitions(graph->grid, v);\n // if(getBit(graph->grid->activePartitionsMap,i))\n graphGridSetActivePartitionsMap(graph->grid, v);\n } #pragma omp parallel for default(none) shared(graph,sharedFrontierQueue) private(i,v) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(i,stats,totalPartitions,FrontierBitmapCurr ,FrontierBitmapNext, graph)", "context_chars": 100, "text": "// PxP\n\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n\n if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j) && graph->grid->partitions[(i * totalPartitions) + j].num_edges)\n {\n breadthFirstSearchPartitionGraphGridBitmap(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), FrontierBitmapCurr, FrontierBitmapNext, stats);\n }\n } #pragma omp parallel for default(none) shared(i,stats,totalPartitions,FrontierBitmapCurr ,FrontierBitmapNext, graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(graph,FrontierBitmap) private(i) schedule(dynamic,1024)", "context_chars": 100, "text": "itmap *FrontierBitmap)\n{\n\n uint32_t i;\n\n graphGridResetActivePartitionsMap(graph->grid);\n\n for(i = 0 ; i < FrontierBitmap->size; i++)\n {\n if(getBit(FrontierBitmap, i))\n graphGridSetActivePartitionsMap(graph->grid, i);\n } #pragma omp parallel for default(none) shared(graph,FrontierBitmap) private(i) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": "stats->processed_nodes += processed_nodes;\n\n\n uint32_t degree;\n struct EdgeList *Nodes;\n\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n if(stats->parents[v] < 0) // optmization\n {\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n if(getBit(bitmapCurr, u))\n {\n stats->parents[v] = u;\n setBitAtomic(bitmapNext, v);\n stats->distances[v] = stats->distances[u] + 1;\n nf++;\n break;\n }\n }\n\n }\n\n } #pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": "ocessed_nodes += processed_nodes;\n\n\n uint32_t degree;\n struct AdjLinkedListNode *Nodes;\n\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n if(stats->parents[v] < 0) // optmization\n {\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n if(getBit(bitmapCurr, u))\n {\n stats->parents[v] = u;\n setBitAtomic(bitmapNext, v);\n stats->distances[v] = stats->distances[u] + 1;\n nf++;\n break;\n }\n }\n\n }\n\n } #pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(uint32_t));\n stats->buckets_map = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_t));\n stats->buckets_map = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_t));\n stats->buckets_map = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_t));\n stats->buckets_map = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_t));\n stats->buckets_map = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:maxDistance) reduction(+:numberOfDiscoverNodes) reduction(min:minDistance)", "context_chars": 100, "text": "istance = UINT_MAX / 2;\n uint32_t maxDistance = 0;\n uint32_t numberOfDiscoverNodes = 0;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n\n if(stats->distances[v] != UINT_MAX / 2)\n {\n\n numberOfDiscoverNodes++;\n\n if(minDistance > stats->distances[v] && stats->distances[v] != 0)\n minDistance = stats->distances[v];\n\n if(maxDistance < stats->distances[v])\n maxDistance = stats->distances[v];\n\n\n }\n\n } #pragma omp parallel for reduction(max:maxDistance) reduction(+:numberOfDiscoverNodes) reduction(min:minDistance)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(e,weight) shared(graph,delta) reduction(+:edgesPlusCounter,edgesMinusCounter)", "context_chars": 100, "text": "MinusCounter = 0;\n uint32_t e;\n float weight;\n // uint32_t src;\n // uint32_t dest;\n\n for(e = 0 ; e < graph->num_edges ; e++)\n {\n\n // src = graph->sorted_edges_array[e].src;\n // dest = graph->sorted_edges_array[e].dest;\n\n\n weight = 1;\n#if WEIGHTED\n weight = graph->sorted_edges_array->edges_array_weight[e];\n\n\n\n\n if(weight > delta)\n {\n edgesPlusCounter++;\n\n }\n else if (weight <= delta)\n {\n edgesMinusCounter++;\n\n }\n } #pragma omp parallel for private(e,weight) shared(graph,delta) reduction(+:edgesPlusCounter,edgesMinusCounter)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(e,weight) shared(edgesMinus_idx,edgesPlus_idx,edgesPlus,edgesMinus,graph)", "context_chars": 100, "text": "t(edgesMinusCounter);\n#endif\n\n uint32_t edgesPlus_idx = 0;\n uint32_t edgesMinus_idx = 0;\n\n for(e = 0 ; e < graph->num_edges ; e++)\n {\n\n weight = 1;\n#if WEIGHTED\n weight = graph->sorted_edges_array->edges_array_weight[e];\n\n uint32_t index = 0;\n\n if(weight > delta)\n {\n index = __sync_fetch_and_add(&edgesPlus_idx, 1);\n\n edgesPlus->edges_array_dest[index] = graph->sorted_edges_array->edges_array_dest[e];\n edgesPlus->edges_array_src[index] = graph->sorted_edges_array->edges_array_src[e];\n#if WEIGHTED\n edgesPlus->edges_array_weight[index] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n#if DIRECTED\n edgesPlusInverse->edges_array_dest[index] = graph->sorted_edges_array->edges_array_src[e];\n edgesPlusInverse->edges_array_src[index] = graph->sorted_edges_array->edges_array_dest[e];\n#if WEIGHTED\n edgesPlusInverse->edges_array_weight[index] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n\n\n }\n else if (weight <= delta)\n {\n index = __sync_fetch_and_add(&edgesMinus_idx, 1);\n\n edgesMinus->edges_array_dest[index] = graph->sorted_edges_array->edges_array_dest[e];\n edgesMinus->edges_array_src[index] = graph->sorted_edges_array->edges_array_src[e];\n#if WEIGHTED\n edgesMinus->edges_array_weight[index] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n#if DIRECTED\n edgesMinusInverse->edges_array_dest[index] = graph->sorted_edges_array->edges_array_src[e];\n edgesMinusInverse->edges_array_src[index] = graph->sorted_edges_array->edges_array_dest[e];\n#if WEIGHTED\n edgesMinusInverse->edges_array_weight[index] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n\n\n }\n } #pragma omp parallel for private(e,weight) shared(edgesMinus_idx,edgesPlus_idx,edgesPlus,edgesMinus,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(bitmapSetCurr, graph, stats) reduction(+ : activeVertices)", "context_chars": 100, "text": "= 0;\n // uint32_t buckets_total_local =\n // process light edges\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n if(__sync_bool_compare_and_swap(&(stats->buckets_map[v]), stats->bucket_current, (UINT_MAX / 2)))\n {\n // if(stats->buckets_map[v] == stats->bucket_current) {\n\n // pop vertex from bucket list\n setBitAtomic(bitmapSetCurr, v);\n\n #pragma omp atomic update\n stats->buckets_total--;\n\n // stats->buckets_map[v] = UINT_MAX/2;\n\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t src = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graph->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += SSSPRelax(src, dest, weight, stats);\n else\n activeVertices += SSSPAtomicRelax(src, dest, weight, stats);\n }\n\n }\n }\n\n Stop(timer_inner);\n\n if(activeVertices)\n printf(\"| L%-14u | %-15u | %-15f |\\n\", iter, stats->buckets_total, Seconds(timer_inner));\n } #pragma omp parallel for private(v) shared(bitmapSetCurr, graph, stats) reduction(+ : activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(bitmapSetCurr, graphLight, stats) reduction(+ : activeVertices)", "context_chars": 100, "text": "r = 0;\n // uint32_t buckets_total_local =\n // process light edges\n for(v = 0; v < graphLight->num_vertices; v++)\n {\n\n if(__sync_bool_compare_and_swap(&(stats->buckets_map[v]), stats->bucket_current, (UINT_MAX / 2)))\n {\n // if(stats->buckets_map[v] == stats->bucket_current) {\n\n // pop vertex from bucket list\n setBitAtomic(bitmapSetCurr, v);\n\n #pragma omp atomic update\n stats->buckets_total--;\n\n // stats->buckets_map[v] = UINT_MAX/2;\n\n uint32_t degree = graphLight->vertices->out_degree[v];\n uint32_t edge_idx = graphLight->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t src = EXTRACT_VALUE(graphLight->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graphLight->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graphLight->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += SSSPRelax(src, dest, weight, stats);\n else\n activeVertices += SSSPAtomicRelax(src, dest, weight, stats);\n }\n\n }\n }\n\n Stop(timer_inner);\n\n if(activeVertices)\n printf(\"| L%-14u | %-15u | %-15f |\\n\", iter, stats->buckets_total, Seconds(timer_inner));\n } #pragma omp parallel for private(v) shared(bitmapSetCurr, graphLight, stats) reduction(+ : activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(bitmapSetCurr, graphHeavy, stats) reduction(+ : activeVertices)", "context_chars": 100, "text": " iter, stats->buckets_total, Seconds(timer_inner));\n }\n\n Start(timer_inner);\n\n for(v = 0; v < graphHeavy->num_vertices; v++)\n {\n if(getBit(bitmapSetCurr, v))\n {\n\n uint32_t degree = graphHeavy->vertices->out_degree[v];\n uint32_t edge_idx = graphHeavy->vertices->edges_idx[v];\n uint32_t j;\n\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t src = EXTRACT_VALUE(graphHeavy->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graphHeavy->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graphHeavy->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += SSSPRelax(src, dest, weight, stats);\n else\n activeVertices += SSSPAtomicRelax(src, dest, weight, stats);\n }\n }\n } #pragma omp parallel for private(v) shared(bitmapSetCurr, graphHeavy, stats) reduction(+ : activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));;\n stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->base_pr;\n stats->realRanks[v] = v;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));;\n stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->base_pr;\n stats->realRanks[v] = v;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));;\n stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->base_pr;\n stats->realRanks[v] = v;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));;\n stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->base_pr;\n stats->realRanks[v] = v;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": "\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->grid->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "amEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);\n\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n #pragma omp parallel for private(j)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n // __sync_fetch_and_add(&pageRanksNext[dest],riDividedOnDiClause[src]);\n // addAtomicFloat(float *num, float value)\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n }\n }\n } #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "lPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n // __sync_fetch_and_add(&pageRanksNext[dest],riDividedOnDiClause[src]);\n // addAtomicFloat(float *num, float value)\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext, stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "geRanksNext[dest] += riDividedOnDiClause[src];\n }\n }\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext, stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->grid->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "amEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);\n\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n #pragma omp parallel for private(j)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n }\n }\n } #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "lPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "geRanksNext[dest] += riDividedOnDiClause[src];\n }\n }\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": "\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->grid->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "treamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);\n\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t i;\n\n // #pragma omp parallel for private(i) // iterate over partitions columnwise\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n\n // addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(i) ", "context_chars": 100, "text": "(j)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t i;\n\n // for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n\n // addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n } #pragma omp parallel for private(i) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n }\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": "\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->grid->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "reamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);\n\n uint32_t j;\n\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions columnwise\n {\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n\n // addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n }\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n pageRanksNext[v] = nodeIncomingPR;\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ces[v].out_degree;\n }\n pageRanksNext[v] = nodeIncomingPR;\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": ">out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n }\n } #pragma omp parallel for default(none) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "pdate\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": ">out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n }\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n }\n }\n\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = FloatToFixed32(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": ">out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n }\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "y[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed32ToFloat(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = FloatToFixed16(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": ">out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n }\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "y[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed16ToFloat(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = FloatToFixed8(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": ">out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n }\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "y[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed8ToFloat(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n // pageRanksFP[v]=stats->base_prFP;\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n {\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n // riDividedOnDiClause[v] = DIVFixed64V1(pageRanksFP[v],UInt64ToFixed(graph->vertices[v].out_degree));\n }\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) schedule(dynamic, 1024) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": ";\n }\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n }\n } #pragma omp parallel for default(none) schedule(dynamic, 1024) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "update\n pageRanksNext[u] += riDividedOnDiClause[v];\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)", "context_chars": 100, "text": ".........................................\\n\");\n\n //2. Quantize riDividedOnDiClause[]\n for(v = 0; v < graph->num_vertices; v++)\n {\n riDividedOnDiClause_quant[v] = quantize_32(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "v] = quantize_32(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint64_t nodeIncomingPR = 0;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause_quant[u];\n }\n pageRanksNext[v] = rDivD_params.scale * nodeIncomingPR;\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "xt[v] = rDivD_params.scale * nodeIncomingPR;\n }\n\n //uint64_t temp_degree = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v];\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs(nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n //temp_degree += vertices[v].in_degree;\n }\n } #pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)", "context_chars": 100, "text": ".........................................\\n\");\n\n //2. Quantize riDividedOnDiClause[]\n for(v = 0; v < graph->num_vertices; v++)\n {\n riDividedOnDiClause_quant[v] = quantize_16(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "v] = quantize_16(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint64_t nodeIncomingPR = 0;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause_quant[u];\n }\n pageRanksNext[v] = rDivD_params.scale * nodeIncomingPR;\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "xt[v] = rDivD_params.scale * nodeIncomingPR;\n }\n\n //uint64_t temp_degree = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v];\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs(nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n //temp_degree += vertices[v].in_degree;\n }\n } #pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)", "context_chars": 100, "text": ".........................................\\n\");\n\n //2. Quantize riDividedOnDiClause[]\n for(v = 0; v < graph->num_vertices; v++)\n {\n riDividedOnDiClause_quant[v] = quantize_8(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "[v] = quantize_8(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint64_t nodeIncomingPR = 0;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause_quant[u];\n }\n //nodeIncomingPR -= (degree * rDivD_params.zero);\n pageRanksNext[v] = rDivD_params.scale * nodeIncomingPR;\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "xt[v] = rDivD_params.scale * nodeIncomingPR;\n }\n\n //uint64_t temp_degree = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v];\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs(nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n //temp_degree += vertices[v].in_degree;\n }\n } #pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": "\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "ams.max,rDivD_params.scale,rDivD_params.zero);\n\n //2. Quantize riDividedOnDiClause[]\n for(v = 0; v < graph->num_vertices; v++)\n {\n riDividedOnDiClause_quant[v] = quantize(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats,rDivD_params,riDividedOnDiClause_quant,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "nt[v] = quantize(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n uint32_t j;\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n #pragma omp atomic update\n pageRanksNext[u] += rDivD_params.scale * (riDividedOnDiClause_quant[v] - rDivD_params.zero);\n }\n } #pragma omp parallel for default(none) private(v) shared(stats,rDivD_params,riDividedOnDiClause_quant,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, stats,pageRanksNext) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "params.scale * (riDividedOnDiClause_quant[v] - rDivD_params.zero);\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v];\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0;\n double error = fabs(nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, stats,pageRanksNext) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:activeVertices)", "context_chars": 100, "text": "----------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n for(i = 0; i < graph->num_vertices; i++)\n {\n workListNext[i] = 1;\n activeVertices++;\n } #pragma omp parallel for reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,sorted_edges_array,vertices,workListCurr,workListNext,stats,graph) private(v) reduction(+:activeVertices,error_total) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n uint32_t edge_idx;\n uint32_t degree;\n uint32_t j;\n uint32_t u;\n double error = 0;\n float nodeIncomingPR = 0;\n degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i))\n }\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n error = fabs(newPageRank - oldPageRank);\n error_total += error / graph->num_vertices;\n if(error >= arguments->epsilon)\n {\n stats->pageRanks[v] = newPageRank;\n degree = graph->vertices->out_degree[v];\n edge_idx = graph->vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n #pragma omp atomic write\n workListNext[u] = 1;\n\n // uint8_t old_val = workListNext[u];\n // if(!old_val){\n // __sync_bool_compare_and_swap(&workListNext[u], 0, 1);\n // }\n }\n activeVertices++;\n }\n }\n } #pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,sorted_edges_array,vertices,workListCurr,workListNext,stats,graph) private(v) reduction(+:activeVertices,error_total) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n if(graph->vertices->out_degree[u])\n aResiduals[v] += 1.0f / graph->vertices->out_degree[u]; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = aResiduals[v] + stats->pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n // #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n degree = graph->vertices->out_degree[v];\n float delta = stats->damp * (aResiduals[v] / degree);\n edge_idx = graph->vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n #pragma omp atomic update\n aResiduals[u] += delta;\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n if(!workListNext[u])\n {\n // #pragma omp atomic write\n workListNext[u] = 1;\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0f;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n if(graph->vertices->out_degree[u])\n aResiduals[v] += 1.0f / graph->vertices->out_degree[u]; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,vertices,sorted_edges_array,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n\n float nodeIncomingPR = 0.0f;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += stats->pageRanks[u] / graph->vertices->out_degree[u];\n }\n\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n float oldPageRank = stats->pageRanks[v];\n // float newPageRank = aResiduals[v]+pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n degree = graph->vertices->out_degree[v];\n float delta = stats->damp * (aResiduals[v] / degree);\n edge_idx = graph->vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n aResiduals[u] += delta;\n if(!workListNext[u])\n {\n workListNext[u] = 1;\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,vertices,sorted_edges_array,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "s[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ces[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)", "context_chars": 100, "text": "lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));\n\n\n\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n } #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)", "context_chars": 100, "text": "[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n Nodes = graph->vertices[v].outNodes;\n uint32_t degree = graph->vertices[v].out_degree;\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->edges_array_dest[j];\n\n // omp_set_lock(&(vertex_lock[u]));\n // pageRanksNext[u] += riDividedOnDiClause[v];\n // omp_unset_lock((&vertex_lock[u]));\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);\n // printf(\"tid %u degree %u edge_idx %u v %u u %u \\n\",tid,degree,edge_idx,v,u );\n\n // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": " // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------\\n\");\n // pageRankPrint(pageRanks, graph->num_vertices);\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_destroy_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ces[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)", "context_chars": 100, "text": "_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));\n\n\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n } #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)", "context_chars": 100, "text": "v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n Nodes = graph->vertices[v].outNodes;\n uint32_t degree = graph->vertices[v].out_degree;\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->edges_array_dest[j];\n\n // omp_set_lock(&(vertex_lock[u]));\n // pageRanksNext[u] += riDividedOnDiClause[v];\n // omp_unset_lock((&vertex_lock[u]));\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);\n // printf(\"tid %u degree %u edge_idx %u v %u u %u \\n\",tid,degree,edge_idx,v,u );\n\n // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": " // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------\\n\");\n // pageRankPrint(pageRanks, graph->num_vertices);\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_destroy_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:activeVertices)", "context_chars": 100, "text": "----------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n for(i = 0; i < graph->num_vertices; i++)\n {\n workListNext[i] = 1;\n activeVertices++;\n } #pragma omp parallel for reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,workListCurr,workListNext,stats,graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)", "context_chars": 100, "text": "s[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n\n uint32_t degree;\n uint32_t j;\n uint32_t u;\n double error = 0;\n float nodeIncomingPR = 0;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i))\n }\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n error = fabs(newPageRank - oldPageRank);\n error_total += error / graph->num_vertices;\n if(error >= arguments->epsilon)\n {\n stats->pageRanks[v] = newPageRank;\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n\n #pragma omp atomic write\n workListNext[u] = 1;\n // uint8_t old_val = workListNext[u];\n // if(!old_val){\n // __sync_bool_compare_and_swap(&workListNext[u], 0, 1);\n // }\n }\n activeVertices++;\n }\n }\n } #pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,workListCurr,workListNext,stats,graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "---------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n if(graph->vertices[u].out_degree)\n aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = aResiduals[v] + stats->pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n // #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n float delta = stats->damp * (aResiduals[v] / degree);\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n if(!workListNext[u])\n {\n\n // #pragma omp atomic write\n workListNext[u] = 1;\n\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0f;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n if(graph->vertices[u].out_degree)\n aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n nodeIncomingPR += stats->pageRanks[u] / graph->vertices[u].out_degree;\n }\n\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n float oldPageRank = stats->pageRanks[v];\n // float newPageRank = aResiduals[v]+pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n float delta = stats->damp * (aResiduals[v] / degree);\n\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->edges_array_dest[j];\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n if(!workListNext[u])\n {\n workListNext[u] = 1;\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "s[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ces[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)", "context_chars": 100, "text": "lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));\n\n\n\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n } #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)", "context_chars": 100, "text": "[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n Nodes = graph->vertices[v].outNodes;\n uint32_t degree = graph->vertices[v].out_degree;\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->dest;\n Nodes = Nodes->next;\n\n // omp_set_lock(&(vertex_lock[u]));\n // pageRanksNext[u] += riDividedOnDiClause[v];\n // omp_unset_lock((&vertex_lock[u]));\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);\n // printf(\"tid %u degree %u edge_idx %u v %u u %u \\n\",tid,degree,edge_idx,v,u );\n\n // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": " // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------\\n\");\n // pageRankPrint(pageRanks, graph->num_vertices);\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_destroy_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ces[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)", "context_chars": 100, "text": "lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));\n\n\n\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n } #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)", "context_chars": 100, "text": "v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n Nodes = graph->vertices[v].outNodes;\n uint32_t degree = graph->vertices[v].out_degree;\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->dest;\n Nodes = Nodes->next;\n // omp_set_lock(&(vertex_lock[u]));\n // pageRanksNext[u] += riDividedOnDiClause[v];\n // omp_unset_lock((&vertex_lock[u]));\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);\n // printf(\"tid %u degree %u edge_idx %u v %u u %u \\n\",tid,degree,edge_idx,v,u );\n\n // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": " // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------\\n\");\n // pageRankPrint(pageRanks, graph->num_vertices);\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_destroy_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:activeVertices)", "context_chars": 100, "text": "----------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n for(i = 0; i < graph->num_vertices; i++)\n {\n workListNext[i] = 1;\n activeVertices++;\n } #pragma omp parallel for reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(arguments, riDividedOnDiClause, workListCurr, workListNext, stats, graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)", "context_chars": 100, "text": "s[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n uint32_t degree;\n uint32_t j;\n uint32_t u;\n double error = 0;\n float nodeIncomingPR = 0;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i))\n }\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n error = fabs(newPageRank - oldPageRank);\n error_total += error / graph->num_vertices;\n if(error >= arguments->epsilon)\n {\n stats->pageRanks[v] = newPageRank;\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n #pragma omp atomic write\n workListNext[u] = 1;\n // uint8_t old_val = workListNext[u];\n // if(!old_val){\n // __sync_bool_compare_and_swap(&workListNext[u], 0, 1);\n // }\n }\n activeVertices++;\n }\n }\n } #pragma omp parallel for default(none) shared(arguments, riDividedOnDiClause, workListCurr, workListNext, stats, graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n if(graph->vertices[u].out_degree)\n aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = aResiduals[v] + stats->pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n // #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n float delta = stats->damp * (aResiduals[v] / degree);\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n if(!workListNext[u])\n {\n\n // #pragma omp atomic write\n workListNext[u] = 1;\n\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(Nodes,degree,v,j,u) shared(stats,workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0f;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n if(graph->vertices[u].out_degree)\n aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(Nodes,degree,v,j,u) shared(stats,workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n nodeIncomingPR += stats->pageRanks[u] / graph->vertices[u].out_degree;\n }\n\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n float oldPageRank = stats->pageRanks[v];\n // float newPageRank = aResiduals[v]+pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n float delta = stats->damp * (aResiduals[v] / degree);\n\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n if(!workListNext[u])\n {\n workListNext[u] = 1;\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/incrementalAggregation.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f(uint32_t));\n stats->dest = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vertices[v] = v;\n stats->degrees[v] = graph->vertices->out_degree[v];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/incrementalAggregation.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f(uint32_t));\n stats->dest = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vertices[v] = v;\n stats->degrees[v] = graph->grid->out_degree[v];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/incrementalAggregation.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f(uint32_t));\n stats->dest = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vertices[v] = v;\n stats->degrees[v] = graph->vertices[v].out_degree;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/incrementalAggregation.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f(uint32_t));\n stats->dest = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vertices[v] = v;\n stats->degrees[v] = graph->vertices[v].out_degree;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/incrementalAggregation.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "dgesByDegree(stats->degrees, stats->vertices, graph->num_vertices);\n\n //initialize variables\n for(v = 0 ; v < graph->num_vertices; v++)\n {\n stats->atomDegree[v] = graph->vertices->out_degree[v];\n stats->atomChild[v] = UINT_MAX;\n stats->atom[v].pair.degree = graph->vertices->out_degree[v];\n stats->atom[v].pair.child = UINT_MAX;\n\n stats->sibling[v] = UINT_MAX;\n stats->dest[v] = v;\n stats->weightSum[v] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/incrementalAggregation.c", "omp_pragma_line": "#pragma omp parallel for private(degreeTemp,edgeTemp,tempV,k,tempU) shared (bitmapNC,reachableSet,stats)", "context_chars": 100, "text": "\n\n returnReachableSetOfNodesFromDendrogram(v, stats->atom, stats->sibling, reachableSet);\n // for(j = reachableSet->head ; j < reachableSet->tail; j++)\n {\n tempV = reachableSet->queue[j];\n\n degreeTemp = graph->vertices->out_degree[tempV];\n edgeTemp = graph->vertices->edges_idx[tempV];\n\n for(k = edgeTemp ; k < (edgeTemp + degreeTemp) ; k++)\n {\n tempU = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[k]);\n\n while(stats->dest[stats->dest[tempU]] != stats->dest[tempU])\n {\n // #pragma omp atomic write\n stats->dest[tempU] = stats->dest[stats->dest[tempU]];\n }\n setBitAtomic(bitmapNC, tempU);\n // edgeWeightUV++;\n }\n } #pragma omp parallel for private(degreeTemp,edgeTemp,tempV,k,tempU) shared (bitmapNC,reachableSet,stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/algorithms/openmp/incrementalAggregation.c", "omp_pragma_line": "#pragma omp parallel for shared(Neighbors, graph, stats) reduction (+:edgeWeightUV)", "context_chars": 100, "text": "}\n setBitAtomic(bitmapNC, tempU);\n // edgeWeightUV++;\n }\n }\n\n // for(t = 0; t < graph->num_vertices ; t++)\n {\n if(getBit(bitmapNC, t))\n {\n if(!isEnArrayQueued(Neighbors, stats->dest[t]))\n {\n edgeWeightUV++;\n enArrayQueueWithBitmapAtomic(Neighbors, stats->dest[t]);\n }\n }\n } #pragma omp parallel for shared(Neighbors, graph, stats) reduction (+:edgeWeightUV)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/utils/graphStats.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ut = (uint32_t *) my_malloc(sizeof(uint32_t) * arguments->binSize);\n\n\n// uint32_t i = 0;\n// // for(i = 0 ; i < arguments->binSize; i++)\n// {\n// histogram_in[i] = 0;\n// histogram_out[i] = 0;\n// } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/utils/graphStats.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "m, uint32_t binSize, uint32_t inout_degree)\n// {\n\n// uint32_t v;\n// uint32_t index;\n\n// // for(v = 0; v < graphStats->num_vertices; v++)\n// {\n\n// index = v / ((graphStats->num_vertices / binSize) + 1);\n\n// if(inout_degree == 1)\n// {\n// #pragma omp atomic update\n// histogram[index] += graphStats->vertices->in_degree[v];\n// }\n// else if(inout_degree == 2)\n// {\n// #pragma omp atomic update\n// histogram[index] += graphStats->vertices->out_degree[v];\n// }\n// } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/utils/graphStats.c", "omp_pragma_line": "#pragma omp parallel for private(y) shared(SparseMatrix)", "context_chars": 100, "text": " *) my_malloc(sizeof(uint32_t) * binSize * binSize);\n\n\n// uint32_t x;\n// uint32_t y;\n// // for(x = 0; x < binSize; x++)\n// {\n// for(y = 0; y < binSize; y++)\n// {\n// SparseMatrix[(binSize * y) + x] = 0;\n// }\n// } #pragma omp parallel for private(y) shared(SparseMatrix)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraph/00_graph_bench/src/utils/graphStats.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " SparseMatrix[(binSize * y) + x] = 0;\n// }\n// }\n\n\n// uint32_t i;\n\n// // for(i = 0; i < graphStats->num_edges; i++)\n// {\n// uint32_t src;\n// uint32_t dest;\n// src = graphStats->sorted_edges_array->edges_array_src[i] / ((graphStats->num_vertices / binSize) + 1);\n// dest = graphStats->sorted_edges_array->edges_array_dest[i] / ((graphStats->num_vertices / binSize) + 1);\n\n// #pragma omp atomic update\n// SparseMatrix[(binSize * dest) + src]++;\n\n// } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/cache/cache.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "]++;\n cache->access_counter++;\n\n // if(cache->access_counter % 1000 == 0)\n // {\n // // for ( i = 0; i < cache->numVertices; ++i)\n // {\n // if(cache->vertices_base_reuse[i] != 0)\n // cache->vertices_base_reuse[i] = cache->access_counter;\n // } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(grid,totalPartitions) private(i)", "context_chars": 100, "text": "itions = 0;\n totalPartitions = grid->num_partitions * grid->num_partitions;\n uint32_t i;\n\n for (i = 0; i < totalPartitions; ++i)\n {\n grid->activePartitions[i] = 0;\n } #pragma omp parallel for default(none) shared(grid,totalPartitions) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx)", "context_chars": 100, "text": " uint32_t i;\n uint32_t totalPartitions = 0;\n totalPartitions = grid->num_partitions;\n\n // for ( i = 0; i < totalPartitions; ++i)\n {\n\n Partition_idx = (row * totalPartitions) + i;\n\n if(grid->partitions[Partition_idx].edgeList->num_edges)\n {\n if(!getBit(grid->activePartitionsMap, Partition_idx))\n {\n setBitAtomic(grid->activePartitionsMap, Partition_idx);\n }\n }\n } #pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx)", "context_chars": 100, "text": " uint32_t i;\n uint32_t totalPartitions = 0;\n totalPartitions = grid->num_partitions;\n\n // for ( i = 0; i < totalPartitions; ++i)\n {\n\n Partition_idx = (row * totalPartitions) + i;\n if(grid->partitions[Partition_idx].edgeList->num_edges)\n {\n grid->activePartitions[Partition_idx] = 1;\n }\n } #pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)", "context_chars": 100, "text": "id->num_vertices);\n grid->activePartitionsMap = newBitmap(totalPartitions);\n\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i)\n {\n\n grid->partitions[i].num_edges = 0;\n grid->partitions[i].num_vertices = 0; /* code */\n grid->activePartitions[i] = 0;\n\n\n } #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(grid)", "context_chars": 100, "text": "->partitions[i].num_vertices = 0; /* code */\n grid->activePartitions[i] = 0;\n\n\n }\n\n\n for (i = 0; i < grid->num_vertices ; ++i)\n {\n\n grid->out_degree[i] = 0;\n grid->in_degree[i] = 0;\n\n } #pragma omp parallel for default(none) private(i) shared(grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,grid)", "context_chars": 100, "text": "Grid *grid, struct EdgeList *edgeList)\n{\n\n uint32_t i;\n uint32_t src;\n uint32_t dest;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n #pragma omp atomic update\n grid->out_degree[src]++;\n\n #pragma omp atomic update\n grid->in_degree[dest]++;\n\n } #pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)", "context_chars": 100, "text": "m_vertices = 0;\n uint32_t totalPartitions = grid->num_partitions * grid->num_partitions;\n\n // #pragma omp parallel for default(none) private(i,src,dest,num_vertices) shared(totalPartitions,grid) schedule(dynamic,1024)\n for ( j = 0; j < totalPartitions; ++j)\n {\n num_vertices = 0;\n // #pragma omp parallel for default(none) private(i,src,dest) shared(j,grid) schedule(dynamic,1024) reduction(max:num_vertices)\n for(i = 0; i < grid->partitions[j].edgeList->num_edges; i++)\n {\n\n src = grid->partitions[j].edgeList->edges_array_src[i];\n dest = grid->partitions[j].edgeList->edges_array_dest[i];\n\n num_vertices = maxTwoIntegers(num_vertices, maxTwoIntegers(src, dest));\n\n }\n\n grid->partitions[j].num_vertices = num_vertices;\n grid->partitions[j].edgeList->num_vertices = num_vertices;\n } #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest,num_vertices) shared(totalPartitions,grid) schedule(dynamic,1024)", "context_chars": 100, "text": "titions;\n\n // #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)\n for ( j = 0; j < totalPartitions; ++j)\n {\n num_vertices = 0;\n // #pragma omp parallel for default(none) private(i,src,dest) shared(j,grid) schedule(dynamic,1024) reduction(max:num_vertices)\n for(i = 0; i < grid->partitions[j].edgeList->num_edges; i++)\n {\n\n src = grid->partitions[j].edgeList->edges_array_src[i];\n dest = grid->partitions[j].edgeList->edges_array_dest[i];\n\n num_vertices = maxTwoIntegers(num_vertices, maxTwoIntegers(src, dest));\n\n }\n\n grid->partitions[j].num_vertices = num_vertices;\n grid->partitions[j].edgeList->num_vertices = num_vertices;\n } #pragma omp parallel for default(none) private(i,src,dest,num_vertices) shared(totalPartitions,grid) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest) shared(j,grid) schedule(dynamic,1024) reduction(max:num_vertices)", "context_chars": 100, "text": "dynamic,1024)\n for ( j = 0; j < totalPartitions; ++j)\n {\n num_vertices = 0;\n // for(i = 0; i < grid->partitions[j].edgeList->num_edges; i++)\n {\n\n src = grid->partitions[j].edgeList->edges_array_src[i];\n dest = grid->partitions[j].edgeList->edges_array_dest[i];\n\n num_vertices = maxTwoIntegers(num_vertices, maxTwoIntegers(src, dest));\n\n } #pragma omp parallel for default(none) private(i,src,dest) shared(j,grid) schedule(dynamic,1024) reduction(max:num_vertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid)", "context_chars": 100, "text": "titions;\n uint32_t num_vertices = grid->num_vertices;\n\n\n uint32_t row;\n uint32_t col;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n // __sync_fetch_and_add(&grid->out_degree[src],1);\n // __sync_fetch_and_add(&grid->in_degree[dest],1);\n\n row = getPartitionID(num_vertices, num_partitions, src);\n col = getPartitionID(num_vertices, num_partitions, dest);\n Partition_idx = (row * num_partitions) + col;\n\n // __sync_fetch_and_add(&grid->partitions[Partition_idx].num_edges,1);\n\n #pragma omp atomic update\n grid->partitions[Partition_idx].num_edges++;\n\n } #pragma omp parallel for default(none) private(i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Edge_idx,i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid)", "context_chars": 100, "text": "tions;\n uint32_t num_vertices = grid->num_vertices;\n\n uint32_t row;\n uint32_t col;\n\n\n\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n\n\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n row = getPartitionID(num_vertices, num_partitions, src);\n col = getPartitionID(num_vertices, num_partitions, dest);\n Partition_idx = (row * num_partitions) + col;\n\n Edge_idx = __sync_fetch_and_add(&grid->partitions[Partition_idx].num_edges, 1);\n\n grid->partitions[Partition_idx].edgeList->edges_array_src[Edge_idx] = edgeList->edges_array_src[i];\n grid->partitions[Partition_idx].edgeList->edges_array_dest[Edge_idx] = edgeList->edges_array_dest[i];\n#if WEIGHTED\n grid->partitions[Partition_idx].edgeList->edges_array_weight[Edge_idx] = edgeList->edges_array_weight[i];\n\n } #pragma omp parallel for default(none) private(Edge_idx,i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/structures/grid.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)", "context_chars": 100, "text": "{\n\n uint32_t i;\n uint32_t totalPartitions = grid->num_partitions * grid->num_partitions;\n\n for ( i = 0; i < totalPartitions; ++i)\n {\n\n grid->partitions[i].edgeList = newEdgeList(grid->partitions[i].num_edges);\n grid->partitions[i].edgeList->num_vertices = grid->partitions[i].num_vertices;\n grid->partitions[i].num_edges = 0;\n\n } #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "eof(uint32_t));\n labelsTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < num_vertices; v++)\n {\n pageRanksFP[v] = FloatToFixed32SORT(pageRanks[v]);\n pageRanksFPTemp[v] = 0;\n labelsTemp[v] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < edgeList->num_vertices; v++)\n {\n labelsInverse[v] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "}\n\n labelsInverse = radixSortEdgesByDegree(degrees, labelsInverse, edgeList->num_vertices);\n\n for(v = 0; v < edgeList->num_vertices; v++)\n {\n labels[labelsInverse[v]] = edgeList->num_vertices - 1 - v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\", Seconds(timer));\n printf(\" -----------------------------------------------------\\n\");\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\", Seconds(timer));\n printf(\" -----------------------------------------------------\\n\");\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * sizeof(uint32_t));\n thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n degrees[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ByDegree(degreesHot[num_buckets - 1], verticesHot[num_buckets - 1], sizeHot[num_buckets - 1]);\n\n for(v = 0; v < sizeHot[1]; v++)\n {\n labels[verticesHot[1][v]] = sizeHot[1] - 1 - v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = 0; v < sizeHot[1]; v++)\n {\n labels[verticesHot[1][v]] = sizeHot[1] - 1 - v;\n }\n\n for(v = 0; v < sizeHot[0]; v++)\n {\n labels[verticesHot[0][v]] = sizeHot[1] + (v);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\", Seconds(timer));\n printf(\" -----------------------------------------------------\\n\");\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " * sizeof(uint32_t));\n thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n degrees[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\", Seconds(timer));\n printf(\" -----------------------------------------------------\\n\");\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,mmode)", "context_chars": 100, "text": "ct EdgeList *edgeList, uint32_t mmode)\n{\n\n uint32_t i;\n uint32_t src;\n uint32_t dest;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n switch(mmode)\n {\n case 1 :\n case 3 :\n {\n #pragma omp atomic update\n degrees[src]++;\n }\n break;\n case 2 :\n case 4 :\n {\n #pragma omp atomic update\n degrees[dest]++;\n }\n break;\n case 5 :\n case 6 :\n {\n #pragma omp atomic update\n degrees[dest]++;\n #pragma omp atomic update\n degrees[src]++;\n }\n break;\n default :\n {\n #pragma omp atomic update\n degrees[src]++;\n }// out-degree\n }\n } #pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,mmode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/ VERTEX_VALUE_LUKEWARM_U32\n }\n\n cache_regions[3] = UINT32_MAX; // VERTEX_CACHE_COLD_U32\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n mask_array[i] = VERTEX_CACHE_COLD_U32;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nds(timer));\n printf(\" -----------------------------------------------------\\n\");\n }\n\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n edgeList->mask_array[i] = mask_array[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,lmode)", "context_chars": 100, "text": "lmode)\n{\n\n uint32_t i;\n uint32_t src;\n uint32_t dest;\n\n\n\n if(lmode != 10)\n {\n for(i = 0; i < edgeList->num_edges; i++)\n {\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n switch(lmode)\n {\n case 1 :\n case 4 :\n case 6 :\n case 8 :\n {\n #pragma omp atomic update\n degrees[src]++;\n } // degree\n break;\n case 2 :\n case 5 :\n case 7 :\n case 9 :\n {\n #pragma omp atomic update\n degrees[dest]++;\n }\n break;\n case 3 :\n {\n #pragma omp atomic update\n degrees[dest]++;\n #pragma omp atomic update\n degrees[src]++;\n }\n break;\n default :\n {\n #pragma omp atomic update\n degrees[src]++;\n }// out-degree\n }\n } #pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,lmode)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mt19937var)", "context_chars": 100, "text": "*) my_malloc(sizeof(mt19937state));\n initializeMersenneState (mt19937var, 27491095);\n for (i = 0; i < edgeList->num_vertices; ++i)\n {\n degrees[i] = (generateRandInt(mt19937var) % edgeList->num_vertices) + omp_get_thread_num();\n } #pragma omp parallel for firstprivate(mt19937var)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ruct EdgeList *relabelEdgeList(struct EdgeList *edgeList, uint32_t *labels)\n{\n\n uint32_t i;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n uint32_t src;\n uint32_t dest;\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n edgeList->edges_array_src[i] = labels[src];\n edgeList->edges_array_dest[i] = labels[dest];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ruct EdgeList *maskEdgeList(struct EdgeList *edgeList, uint32_t *mask_array)\n{\n uint32_t i;\n\n for(i = 0; i < edgeList->num_edges; i++)\n {\n uint32_t src;\n uint32_t dest;\n src = edgeList->edges_array_src[i];\n dest = edgeList->edges_array_dest[i];\n\n edgeList->edges_array_src[i] = src | mask_array[src];\n edgeList->edges_array_dest[i] = dest | mask_array[dest];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/preprocess/reorder.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " break;\n\n }\n fclose(pText);\n\n\n\n edgeList = relabelEdgeList(edgeList, labels);\n\n for (v = 0; v < edgeList->num_vertices; ++v)\n {\n edgeList->label_array[v] = labels[edgeList->label_array[v]];\n edgeList->inverse_label_array[edgeList->label_array[v]] = v;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/DFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for DFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/DFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "cessed_nodes = 0;\n stats->num_vertices = graph->num_vertices;\n stats->time_total = 0.0f;\n\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/DFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "essed_nodes = 0;\n stats->num_vertices = graph->num_vertices;\n stats->time_total = 0.0f;\n\n\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/DFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "essed_nodes = 0;\n stats->num_vertices = graph->num_vertices;\n stats->time_total = 0.0f;\n\n\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt32_t));\n stats->parents = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt32_t));\n stats->parents = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt32_t));\n stats->parents = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt32_t));\n stats->parents = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:maxDistance) reduction(+:numberOfDiscoverNodes) reduction(min:minDistance)", "context_chars": 100, "text": "istance = UINT_MAX / 2;\n uint32_t maxDistance = 0;\n uint32_t numberOfDiscoverNodes = 0;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n\n if(stats->distances[v] != UINT_MAX / 2)\n {\n\n numberOfDiscoverNodes++;\n\n if(minDistance > stats->distances[v] && stats->distances[v] != 0)\n minDistance = stats->distances[v];\n\n if(maxDistance < stats->distances[v])\n maxDistance = stats->distances[v];\n\n\n }\n\n } #pragma omp parallel for reduction(max:maxDistance) reduction(+:numberOfDiscoverNodes) reduction(min:minDistance)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": " stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n // #pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,8)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n float weight = 1;\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n if(getBit(bitmapCurr, src))\n {\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n }\n } #pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,8)", "context_chars": 100, "text": "rtitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n float weight = 1;\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n if(getBit(bitmapCurr, src))\n {\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": " stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // #pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n float weight = 1;\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n if(getBit(bitmapCurr, src))\n {\n // if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n // else\n // activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n }\n } #pragma omp parallel for private(j) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "rtitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n float weight = 1;\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n if(getBit(bitmapCurr, src))\n {\n // if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n // else\n // activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(i) reduction(+ : activeVertices) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(e,src,dest) shared(graph) reduction(+:edgesPlusCounter,edgesMinusCounter)", "context_chars": 100, "text": " = 0;\n uint32_t edgesMinusCounter = 0;\n uint32_t e;\n uint32_t src;\n uint32_t dest;\n\n for(e = 0 ; e < graph->num_edges ; e++)\n {\n\n src = graph->sorted_edges_array->edges_array_src[e];\n dest = graph->sorted_edges_array->edges_array_dest[e];\n if(src <= dest)\n {\n edgesPlusCounter++;\n }\n else if (src > dest)\n {\n edgesMinusCounter++;\n }\n } #pragma omp parallel for private(e,src,dest) shared(graph) reduction(+:edgesPlusCounter,edgesMinusCounter)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(e,src,dest) shared(edgesMinus_idx,edgesPlus_idx, edgesPlus,edgesMinus,graph)", "context_chars": 100, "text": "tices = graph->num_vertices;\n\n uint32_t edgesPlus_idx = 0;\n uint32_t edgesMinus_idx = 0;\n\n for(e = 0 ; e < graph->num_edges ; e++)\n {\n uint32_t localEdgesPlus_idx = 0;\n uint32_t localEdgesMinus_idx = 0;\n\n src = graph->sorted_edges_array->edges_array_src[e];\n dest = graph->sorted_edges_array->edges_array_dest[e];\n if(src <= dest)\n {\n localEdgesPlus_idx = __sync_fetch_and_add(&edgesPlus_idx, 1);\n\n edgesPlus->edges_array_src[localEdgesPlus_idx] = graph->sorted_edges_array->edges_array_src[e];\n edgesPlus->edges_array_dest[localEdgesPlus_idx] = graph->sorted_edges_array->edges_array_dest[e];\n#if WEIGHTED\n edgesPlus->edges_array_weight[localEdgesPlus_idx] = graph->sorted_edges_array->edges_array_weight[e];\n\n }\n else if (src > dest)\n {\n localEdgesMinus_idx = __sync_fetch_and_add(&edgesMinus_idx, 1);\n\n edgesMinus->edges_array_src[localEdgesMinus_idx] = graph->sorted_edges_array->edges_array_src[e];\n edgesMinus->edges_array_dest[localEdgesMinus_idx] = graph->sorted_edges_array->edges_array_dest[e];\n#if WEIGHTED\n edgesMinus->edges_array_weight[localEdgesMinus_idx] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n }\n } #pragma omp parallel for private(e,src,dest) shared(edgesMinus_idx,edgesPlus_idx, edgesPlus,edgesMinus,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(vertices,sorted_edges_array,graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "ifdef SNIPER_HARNESS\n int iteration = iter;\n SimMarker(1, iteration);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t minDistance = UINT_MAX / 2;\n uint32_t minParent = UINT_MAX;\n uint32_t degree;\n uint32_t j, u, w;\n uint32_t edge_idx;\n\n if(getBit(bitmapCurr, v))\n {\n\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n // printf(\"degree %u arguments->source %u \\n\",degree,v );\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array->edges_array_dest[j]);\n w = 1;\n#if WEIGHTED\n w = sorted_edges_array->edges_array_weight[j];\n\n\n if (minDistance > (stats->distances[u] + w))\n {\n minDistance = (stats->distances[u] + w);\n minParent = u;\n }\n }\n\n if(bellmanFordAtomicMin(&(stats->distances[v]), minDistance))\n {\n stats->parents[v] = minParent;\n\n degree = graph->vertices->out_degree[v];\n edge_idx = graph->vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n if(!getBit(bitmapNext, u))\n {\n activeVertices++;\n setBitAtomic(bitmapNext, u);\n }\n }\n }\n }\n } #pragma omp parallel for private(v) shared(vertices,sorted_edges_array,graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "ifdef SNIPER_HARNESS\n int iteration = iter;\n SimMarker(1, iteration);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n if(getBit(bitmapCurr, v))\n {\n\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t src = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graph->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "----------------------------------------\\n\");\n\n\n\n\n Start(timer);\n\n\n\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vertices[v] = v;\n degrees[v] = graph->vertices->out_degree[v];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(v,n) shared(vertices,graphPlus,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "ifdef SNIPER_HARNESS\n int iteration = iter;\n SimMarker(1, iteration);\n#endif\n\n for(n = 0; n < graphPlus->num_vertices; n++)\n {\n\n v = vertices[n];\n\n if(getBit(bitmapCurr, v))\n {\n\n uint32_t degree = graphPlus->vertices->out_degree[v];\n uint32_t edge_idx = graphPlus->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n\n uint32_t src = EXTRACT_VALUE(graphPlus->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graphPlus->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graphPlus->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(v,n) shared(vertices,graphPlus,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(v,n) shared(vertices,graphMinus,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n SimMarker(2, iteration);\n SimMarker(3, iteration);\n#endif\n\n for(n = 0; n < graphMinus->num_vertices; n++)\n {\n\n v = vertices[n];\n\n if(getBit(bitmapCurr, v))\n {\n\n uint32_t degree = graphMinus->vertices->out_degree[v];\n uint32_t edge_idx = graphMinus->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n\n uint32_t src = EXTRACT_VALUE(graphMinus->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graphMinus->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graphMinus->sorted_edges_array->edges_array_weight[j];\n\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n }\n } #pragma omp parallel for private(v,n) shared(vertices,graphMinus,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(nodes,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "imer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t minDistance = UINT_MAX / 2;\n uint32_t degree;\n uint32_t j, u, w;\n\n uint32_t minParent = UINT_MAX;\n\n if(getBit(bitmapCurr, v))\n {\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n // printf(\"degree %u arguments->source %u \\n\",degree,v );\n for(j = 0 ; j < (degree) ; j++)\n {\n u = nodes->edges_array_dest[j];\n w = 1;\n#if WEIGHTED\n w = nodes->edges_array_weight[j];\n\n // printf(\"w %u \\n\",w );\n if (minDistance > (stats->distances[u] + w))\n {\n minDistance = (stats->distances[u] + w);\n minParent = u;\n }\n }\n\n if(bellmanFordAtomicMin(&(stats->distances[v]), minDistance))\n {\n // stats->distances[v] = minDistance;\n stats->parents[v] = minParent;\n nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = nodes->edges_array_dest[j];\n w = 1;\n#if WEIGHTED\n w = nodes->edges_array_weight[j];\n\n\n if(!getBit(bitmapNext, u))\n {\n activeVertices++;\n setBitAtomic(bitmapNext, u);\n }\n }\n }\n }\n } #pragma omp parallel for private(nodes,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(nodes,degree,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "mer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n if(getBit(bitmapCurr, v))\n {\n\n degree = graph->vertices[v].out_degree;\n nodes = graph->vertices[v].outNodes;\n uint32_t j;\n for(j = 0 ; j < (degree) ; j++)\n {\n\n uint32_t src = nodes->edges_array_src[j];\n uint32_t dest = nodes->edges_array_dest[j];\n float weight = 1;\n#if WEIGHTED\n weight = nodes->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(src, dest, weight, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(src, dest, weight, stats, bitmapNext);\n }\n\n }\n } #pragma omp parallel for private(nodes,degree,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(nodes,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "mer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t minDistance = UINT_MAX / 2;\n uint32_t degree;\n uint32_t j, u, w;\n\n\n if(getBit(bitmapCurr, v))\n {\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = nodes->dest;\n w = 1;\n#if WEIGHTED\n w = nodes->weight;\n\n nodes = nodes->next;\n\n if (minDistance > (stats->distances[u] + w))\n {\n minDistance = (stats->distances[u] + w);\n }\n }\n\n if(bellmanFordAtomicMin(&(stats->distances[v]), minDistance))\n {\n stats->parents[v] = minDistance;\n\n nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = nodes->dest;\n w = 1;\n#if WEIGHTED\n w = nodes->weight;\n\n nodes = nodes->next;\n\n if(!getBit(bitmapNext, u))\n {\n activeVertices++;\n setBitAtomic(bitmapNext, u);\n }\n }\n }\n }\n } #pragma omp parallel for private(nodes,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/bellmanFord.c", "omp_pragma_line": "#pragma omp parallel for private(nodes,degree,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)", "context_chars": 100, "text": "mer_inner);\n stats->processed_nodes += activeVertices;\n activeVertices = 0;\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n if(getBit(bitmapCurr, v))\n {\n\n degree = graph->vertices[v].out_degree;\n nodes = graph->vertices[v].outNodes;\n uint32_t j;\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = nodes->dest;\n uint32_t w = 1;\n#if WEIGHTED\n w = nodes->weight;\n\n nodes = nodes->next;\n\n if(arguments->algo_numThreads == 1)\n activeVertices += bellmanFordRelax(v, u, w, stats, bitmapNext);\n else\n activeVertices += bellmanFordAtomicRelax(v, u, w, stats, bitmapNext);\n }\n\n }\n } #pragma omp parallel for private(nodes,degree,v) shared(graph,stats,bitmapNext,bitmapCurr) reduction(+ : activeVertices) schedule (dynamic,128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "al = 0.0f;\n stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "al = 0.0f;\n stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "al = 0.0f;\n stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "al = 0.0f;\n stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for shared(stats) schedule(dynamic, 128)", "context_chars": 100, "text": "loc(sizeof(struct Timer));\n\n Start(timer);\n\n#ifdef SNIPER_HARNESS\n SimRoiStart();\n#endif\n\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t degree_u = graph->vertices->out_degree[u];\n uint32_t edge_idx_u = graph->vertices->edges_idx[u];\n uint32_t v;\n\n for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)\n {\n uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);\n uint32_t degree_v = graph->vertices->out_degree[node_v];\n uint32_t edge_idx_v = graph->vertices->edges_idx[node_v];\n uint32_t w;\n\n uint32_t degree_iter = graph->vertices->out_degree[u];\n uint32_t edge_idx_iter = graph->vertices->edges_idx[u];\n uint32_t iter;\n\n for(w = edge_idx_v; w < (edge_idx_v + degree_v) ; w++)\n {\n uint32_t node_w = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[w]);\n uint32_t node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_iter]);\n\n for(iter = edge_idx_iter; iter < (edge_idx_iter + degree_iter) ; iter++)\n {\n node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);\n\n if(node_iter == node_w)\n // #pragma omp atomic update\n stats->counts[u]++;\n }\n }\n }\n } #pragma omp parallel for shared(stats) schedule(dynamic, 128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)", "context_chars": 100, "text": "IPER_HARNESS\n SimRoiEnd();\n#endif\n\n Stop(timer);\n stats->time_total = Seconds(timer);\n\n for(u = 0; u < stats->num_vertices; u++)\n {\n counts += stats->counts[u];\n } #pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)", "context_chars": 100, "text": "loc(sizeof(struct Timer));\n\n Start(timer);\n\n#ifdef SNIPER_HARNESS\n SimRoiStart();\n#endif\n\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t degree_u = graph->vertices->out_degree[u];\n uint32_t edge_idx_u = graph->vertices->edges_idx[u];\n uint32_t v;\n\n steps++;\n for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)\n {\n uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);\n uint32_t degree_v = graph->vertices->out_degree[node_v];\n\n if(node_v > u)\n break;\n\n uint32_t edge_idx_v = graph->vertices->edges_idx[node_v];\n uint32_t w;\n\n\n uint32_t degree_iter = graph->vertices->out_degree[u];\n uint32_t edge_idx_iter = graph->vertices->edges_idx[u];\n uint32_t iter;\n\n for(w = edge_idx_v; w < (edge_idx_v + degree_v) ; w++)\n {\n\n uint32_t node_w = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[w]);\n if(node_w > node_v)\n break;\n\n uint32_t node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_iter]);\n\n\n\n for(iter = edge_idx_iter; iter < (edge_idx_iter + degree_iter) ; iter++)\n {\n node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);\n\n if(node_iter >= node_w)\n break;\n }\n\n\n if(node_w == node_iter)\n {\n counts++;\n }\n }\n }\n } #pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for shared(stats) schedule(dynamic, 128)", "context_chars": 100, "text": "loc(sizeof(struct Timer));\n\n Start(timer);\n\n#ifdef SNIPER_HARNESS\n SimRoiStart();\n#endif\n\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t degree_u = graph->vertices->out_degree[u];\n uint32_t edge_idx_u = graph->vertices->edges_idx[u];\n uint32_t v;\n\n for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)\n {\n uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);\n\n if(node_v > u)\n break;\n\n uint32_t degree_v = graph->vertices->out_degree[node_v];\n uint32_t edge_idx_v = graph->vertices->edges_idx[node_v];\n uint32_t w;\n\n uint32_t degree_iter = graph->vertices->out_degree[u];\n uint32_t edge_idx_iter = graph->vertices->edges_idx[u];\n uint32_t iter;\n\n for(w = edge_idx_v; w < (edge_idx_v + degree_v) ; w++)\n {\n\n uint32_t node_w = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[w]);\n\n if(node_w > node_v)\n break;\n\n uint32_t node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_iter]);\n\n for(iter = edge_idx_iter; iter < (edge_idx_iter + degree_iter) ; iter++)\n {\n node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);\n\n if(node_iter >= node_w)\n break;\n }\n\n if(node_w == node_iter)\n {\n #pragma omp atomic update\n stats->counts[node_w]++;\n }\n }\n }\n } #pragma omp parallel for shared(stats) schedule(dynamic, 128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)", "context_chars": 100, "text": "IPER_HARNESS\n SimRoiEnd();\n#endif\n\n Stop(timer);\n stats->time_total = Seconds(timer);\n\n for(u = 0; u < stats->num_vertices; u++)\n {\n counts += stats->counts[u];\n } #pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/triangleCount.c", "omp_pragma_line": "#pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)", "context_chars": 100, "text": "loc(sizeof(struct Timer));\n\n Start(timer);\n\n#ifdef SNIPER_HARNESS\n SimRoiStart();\n#endif\n\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t degree_u = graph->vertices->out_degree[u];\n uint32_t edge_idx_u = graph->vertices->edges_idx[u];\n uint32_t v;\n\n steps++;\n for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)\n {\n uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);\n\n if(node_v > u)\n break;\n counts += countIntersectionsBinarySearch(u, node_v, graph);\n }\n } #pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "f(float));\n stats->vector_input = (float *) my_malloc(graph->num_vertices * sizeof(float));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->vector_output[v] = 0.0f;\n stats->vector_input[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "f(float));\n stats->vector_input = (float *) my_malloc(graph->num_vertices * sizeof(float));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->vector_output[v] = 0.0f;\n stats->vector_input[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "(float));\n stats->vector_input = (float *) my_malloc(graph->num_vertices * sizeof(float));\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->vector_output[v] = 0.0f;\n stats->vector_input[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "f(float));\n stats->vector_input = (float *) my_malloc(graph->num_vertices * sizeof(float));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->vector_output[v] = 0.0f;\n stats->vector_input[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": ">iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n float weight = 0.0001f;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n // #pragma omp atomic update\n // __sync_fetch_and_add(&stats->vector_output[dest],(weight * stats->vector_input[src]));\n // addAtomicFloat(&stats->vector_output[dest], (weight * stats->vector_input[src])\n\n // #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "lPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n float weight = 0.0001f;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n // #pragma omp atomic update\n // __sync_fetch_and_add(&stats->vector_output[dest],(weight * stats->vector_input[src]));\n // addAtomicFloat(&stats->vector_output[dest], (weight * stats->vector_input[src])\n\n // #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ts->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n float weight = 0.0001f;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n // #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "rtitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n float weight = 0.0001f;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = partition->edgeList->edges_array_weight[k];\n\n\n // #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/ graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": ">iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = DoubleToFixed64(partition->edgeList->edges_array_weight[k]);\n\n // #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "lPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = DoubleToFixed64(partition->edgeList->edges_array_weight[k]);\n\n // #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/ graph->grid->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ts->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = DoubleToFixed64(partition->edgeList->edges_array_weight[k]);\n\n\n // #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "rtitions; ++j) // iterate over partitions colwise\n {\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n#if WEIGHTED\n weight = DoubleToFixed64(partition->edgeList->edges_array_weight[k]);\n\n\n // #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)", "context_chars": 100, "text": "def SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src ;\n uint32_t dest = v;\n float weight = 0.0001f;\n degree = vertices->out_degree[dest];\n edge_idx = vertices->edges_idx[dest];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n src = EXTRACT_VALUE(sorted_edges_array[j]);\n#if WEIGHTED\n weight = edges_array_weight[j];\n\n\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->vector_input[src]), 'r', src, EXTRACT_MASK(sorted_edges_array[j]));\n\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->vector_output[dest]), 'r', dest, graph->sorted_edges_array->mask_array[dest]);\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->vector_output[dest]), 'w', dest, graph->sorted_edges_array->mask_array[dest]);\n\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "onds(timer_inner));\n\n }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n float weight = 0.0001f;\n degree = vertices->out_degree[src];\n edge_idx = vertices->edges_idx[src];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n dest = EXTRACT_VALUE(sorted_edges_array[j]);\n#if WEIGHTED\n weight = edges_array_weight[j];\n\n\n #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "nds(timer_inner));\n\n }// end iteration loop\n\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n setDoubleTaggedCacheThresholdDegreeAvg(stats->cache, graph->vertices->out_degree);\n#endif\n\n for (w = 0; w < graph->num_edges ; ++w)\n {\n#if WEIGHTED\n edges_array_weight_fixedPoint[w] = FloatToFixed32(edges_array_weight[w]);\n#else\n edges_array_weight_fixedPoint[w] = FloatToFixed32(0.0001f);\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "h->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = FloatToFixed32(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)", "context_chars": 100, "text": "def SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n float weight = FloatToFixed32(0.0001f);\n degree = vertices->out_degree[dest];\n edge_idx = vertices->edges_idx[dest];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n src = EXTRACT_VALUE(sorted_edges_array[j]);\n#if WEIGHTED\n weight = edges_array_weight_fixedPoint[j];\n\n\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->vector_input[src]), 'r', src, EXTRACT_MASK(sorted_edges_array[j]));\n\n vector_output[dest] += MULFixed32V1(weight, vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->vector_output[dest]), 'r', dest, graph->sorted_edges_array->mask_array[dest]);\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->vector_output[dest]), 'w', dest, graph->sorted_edges_array->mask_array[dest]);\n\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "onds(timer_inner));\n\n }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed32ToFloat(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "ertices; v++)\n {\n stats->vector_output[v] = Fixed32ToFloat(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n stats->vector_input[v] = (1.0f / graph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "aph->vertices->out_degree[v]);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n degree = vertices->out_degree[src];\n edge_idx = vertices->edges_idx[src];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n dest = EXTRACT_VALUE(sorted_edges_array[j]);\n#if WEIGHTED\n weight = DoubleToFixed64(edges_array_weight[j]);\n\n\n #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "onds(timer_inner));\n\n }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n float weight = 0.0001f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[dest].inNodes;\n degree = graph->vertices[dest].in_degree;\n#else\n Nodes = graph->vertices[dest].outNodes;\n degree = graph->vertices[dest].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n src = Nodes->edges_array_dest[j];\n#if WEIGHTED\n weight = Nodes->edges_array_weight[j];\n\n stats->vector_output[dest] += (weight * stats->vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n float weight = 0.0001f;\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n dest = Nodes->edges_array_dest[j];\n#if WEIGHTED\n weight = Nodes->edges_array_weight[j];\n\n\n #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "raph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n uint64_t weight = DoubleToFixed64(0.0001f);\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[dest].inNodes;\n degree = graph->vertices[dest].in_degree;\n#else\n Nodes = graph->vertices[dest].outNodes;\n degree = graph->vertices[dest].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n src = Nodes->edges_array_dest[j];\n\n\n#if WEIGHTED\n weight = DoubleToFixed64(Nodes->edges_array_weight[j]);\n\n\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "tices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "raph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n dest = Nodes->edges_array_dest[j];\n#if WEIGHTED\n weight = DoubleToFixed64(Nodes->edges_array_weight[j]);\n\n #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n float weight = 0.0001f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[dest].inNodes;\n degree = graph->vertices[dest].in_degree;\n#else\n Nodes = graph->vertices[dest].outNodes;\n degree = graph->vertices[dest].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n src = Nodes->dest;\n#if WEIGHTED\n weight = Nodes->weight;\n\n Nodes = Nodes->next;\n\n stats->vector_output[dest] += (weight * stats->vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n float weight = 0.0001f;\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n\n dest = Nodes->dest;\n#if WEIGHTED\n weight = Nodes->weight;\n\n Nodes = Nodes->next;\n\n #pragma omp atomic update\n stats->vector_output[dest] += (weight * stats->vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "raph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src;\n uint32_t dest = v;\n uint64_t weight = DoubleToFixed64(0.0001f);\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[dest].inNodes;\n degree = graph->vertices[dest].in_degree;\n#else\n Nodes = graph->vertices[dest].outNodes;\n degree = graph->vertices[dest].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n src = Nodes->dest;\n\n#if WEIGHTED\n weight = DoubleToFixed64(Nodes->weight);\n\n Nodes = Nodes->next;\n\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]); // stats->pageRanks[v]/graph->vertices[v].out_degree;\n\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "---------------------------------\\n\");\n\n //assume any vector input for benchamrking purpose.\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n stats->vector_input[v] = (1.0f / graph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "raph->vertices[v].out_degree);\n else\n stats->vector_input[v] = 0.001f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n vector_output[v] = 0;\n vector_input[v] = DoubleToFixed64(stats->vector_input[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "iterations < arguments->iterations; stats->iterations++)\n {\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n uint64_t weight = DoubleToFixed64(0.0001f);\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n dest = Nodes->dest;\n\n#if WEIGHTED\n weight = DoubleToFixed64(Nodes->weight);\n\n Nodes = Nodes->next;\n\n #pragma omp atomic update\n vector_output[dest] += MULFixed64V1(weight, vector_input[src]);\n }\n\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(\"| %-21u | %-27f | \\n\", stats->iterations, Seconds(timer_inner));\n\n }// end iteration loop\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SPMV.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "rtices; v++)\n {\n stats->vector_output[v] = Fixed64ToDouble(vector_output[v]);\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n sum += ((int)(stats->vector_output[v] * 100 + .5) / 100.0);\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));\n stats->labels = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->components[v] = v;\n stats->labels[v] = v;\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));\n stats->labels = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->components[v] = v;\n stats->labels[v] = v;\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));\n stats->labels = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->components[v] = v;\n stats->labels[v] = v;\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));\n stats->labels = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->components[v] = v;\n stats->labels[v] = v;\n stats->counts[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": ";\n\n }\n\n}\n\n\nvoid compressNodes(uint32_t num_vertices, uint32_t *components)\n{\n uint32_t n;\n for (n = 0; n < num_vertices; n++)\n {\n while (components[n] != components[components[n]])\n {\n components[n] = components[components[n]];\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 512)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n degree = graph->vertices->out_degree[src];\n edge_idx = graph->vertices->edges_idx[src];\n // #ifdef CACHE_HARNESS\n // AccessDoubleTaggedCacheFloat(stats->cache, (uint64_t) & (graph->vertices->out_degree[src]), 'r', src, graph->vertices->out_degree[src]);\n // AccessDoubleTaggedCacheFloat(stats->cache, (uint64_t) & (graph->vertices->edges_idx[src]), 'r', src, graph->vertices->edges_idx[src]);\n // \n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n dest = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->components[src]), 'r', src, graph->sorted_edges_array->mask_array[src]);\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->components[dest]), 'r', dest, EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]));\n\n if(comp_src == comp_dest)\n continue;\n\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->components[comp_high]), 'r', comp_high, graph->sorted_edges_array->mask_array[comp_high]);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->components[comp_high]), 'w', comp_high, graph->sorted_edges_array->mask_array[comp_high]);\n\n }\n }\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "t(timer);\n for(r = 0; r < stats->neighbor_rounds; r++)\n {\n Start(timer_inner);\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n uint32_t degree_out = graph->vertices->out_degree[u];\n uint32_t edge_idx_out = graph->vertices->edges_idx[u];\n\n for(j = (edge_idx_out + r) ; j < (edge_idx_out + degree_out) ; j++)\n {\n v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n break;\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "-----------------------------------------------------\\n\");\n Start(timer_inner);\n#if DIRECTED\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n uint32_t degree_out;\n uint32_t degree_in;\n uint32_t edge_idx_out;\n uint32_t edge_idx_in;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n degree_out = graph->vertices->out_degree[u];\n edge_idx_out = graph->vertices->edges_idx[u];\n\n for(j = (edge_idx_out + stats->neighbor_rounds) ; j < (edge_idx_out + degree_out) ; j++)\n {\n v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n }\n\n degree_in = graph->inverse_vertices->out_degree[u];\n edge_idx_in = graph->inverse_vertices->edges_idx[u];\n\n for(j = (edge_idx_in) ; j < (edge_idx_in + degree_in) ; j++)\n {\n v = EXTRACT_VALUE(graph->inverse_sorted_edges_array->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n }\n\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "y->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n }\n\n }\n#else\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n uint32_t degree_out;\n uint32_t edge_idx_out;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n degree_out = graph->vertices->out_degree[u];\n edge_idx_out = graph->vertices->edges_idx[u];\n\n for(j = (edge_idx_out + stats->neighbor_rounds) ; j < (edge_idx_out + degree_out) ; j++)\n {\n v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n linkNodes(u, v, stats->components);\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n degree = graph->vertices->out_degree[src];\n edge_idx = graph->vertices->edges_idx[src];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n dest = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n }\n } #pragma omp parallel for private(v,degree,edge_idx) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:change)", "context_chars": 100, "text": " }\n }\n\n\n // compressNodes( stats->num_vertices, stats->components);\n\n for(v = 0 ; v < ((bitmapNext->size + kBitsPerWord - 1) / kBitsPerWord); v++)\n {\n change += bitmapNext->bitarray[v];\n bitmapNext->bitarray[v] = 0;\n } #pragma omp parallel for reduction (+:change)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n\n if(comp_src != comp_dest)\n {\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n }\n }\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n\n if(comp_src != comp_dest)\n {\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n }\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,neighbor)", "context_chars": 100, "text": ");\n struct Bitmap *linked = newBitmap(graph->num_vertices);\n\n stats->neighbor_rounds = 2;\n for(v = 0; v < graph->num_vertices; v++)\n {\n neighbor[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,neighbor)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "t(timer);\n for(r = 0; r < stats->neighbor_rounds; r++)\n {\n Start(timer_inner);\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(neighbor[src] >= r && !getBit(linked, src))\n {\n linkNodes(src, dest, stats->components);\n setBit(linked, src);\n }\n else\n {\n neighbor[src]++;\n }\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(neighbor[src] >= r && !getBit(linked, src))\n {\n linkNodes(src, dest, stats->components);\n setBit(linked, src);\n }\n else\n {\n neighbor[src]++;\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats,neighbor)", "context_chars": 100, "text": " Stop(timer_inner);\n printf(\"| %-21u | %-27f | \\n\", r, Seconds(timer_inner));\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n neighbor[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(stats,neighbor)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "-----------------------------------------------------\\n\");\n Start(timer_inner);\n#if DIRECTED\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(stats->components[src] != sampleComp)\n {\n\n if(neighbor[src] >= stats->neighbor_rounds)\n {\n linkNodes(src, dest, stats->components);\n }\n else\n {\n neighbor[src]++;\n }\n\n }\n\n if(stats->components[dest] != sampleComp)\n {\n linkNodes(dest, src, stats->components);\n }\n\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ts->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(stats->components[src] != sampleComp)\n {\n\n if(neighbor[src] >= stats->neighbor_rounds)\n {\n linkNodes(src, dest, stats->components);\n }\n else\n {\n neighbor[src]++;\n }\n\n }\n\n if(stats->components[dest] != sampleComp)\n {\n linkNodes(dest, src, stats->components);\n }\n\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "linkNodes(dest, src, stats->components);\n }\n\n }\n }\n }\n#else\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(stats->components[src] != sampleComp)\n {\n\n if(neighbor[src] >= stats->neighbor_rounds)\n {\n linkNodes(src, dest, stats->components);\n }\n else\n {\n neighbor[src]++;\n }\n }\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ts->algo_numThreads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(stats->components[src] != sampleComp)\n {\n\n if(neighbor[src] >= stats->neighbor_rounds)\n {\n linkNodes(src, dest, stats->components);\n }\n else\n {\n neighbor[src]++;\n }\n }\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": " Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n\n }\n }\n } #pragma omp parallel for private(i) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)", "context_chars": 100, "text": "ads)\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n // for (j = 0; j < totalPartitions; ++j) // iterate over partitions colwise\n {\n uint32_t k;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n\n uint32_t src = partition->edgeList->edges_array_src[k];\n uint32_t dest = partition->edgeList->edges_array_dest[k];\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n\n }\n } #pragma omp parallel for private(j) schedule (dynamic,arguments->algo_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:change)", "context_chars": 100, "text": " }\n }\n\n\n // compressNodes( stats->num_vertices, stats->components);\n for(v = 0 ; v < ((bitmapNext->size + kBitsPerWord - 1) / kBitsPerWord); v++)\n {\n change += bitmapNext->bitarray[v];\n bitmapNext->bitarray[v] = 0;\n } #pragma omp parallel for reduction (+:change)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n dest = Nodes->edges_array_dest[j];\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n\n if(comp_src == comp_dest)\n continue;\n\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n }\n }\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "t(timer);\n for(r = 0; r < stats->neighbor_rounds; r++)\n {\n Start(timer_inner);\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n struct EdgeList *Nodes = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = (0 + r) ; j < (degree_out) ; j++)\n {\n v = Nodes->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n break;\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "-----------------------------------------------------\\n\");\n Start(timer_inner);\n#if DIRECTED\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n struct EdgeList *Nodes_out = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = ( 0 + stats->neighbor_rounds) ; j < (degree_out) ; j++)\n {\n v = Nodes_out->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n }\n\n struct EdgeList *Nodes_in = graph->vertices[u].inNodes;\n uint32_t degree_in = graph->vertices[u].in_degree;\n\n for(j = (0) ; j < (degree_in) ; j++)\n {\n v = Nodes_in->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n }\n\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "in->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n }\n\n }\n#else\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n struct EdgeList *Nodes_out = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = ( 0 + stats->neighbor_rounds) ; j < (degree_out) ; j++)\n {\n v = Nodes_out->edges_array_dest[j];\n linkNodes(u, v, stats->components);\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n struct EdgeList *Nodes_out = graph->vertices[v].outNodes;\n uint32_t degree_out = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree_out) ; j++)\n {\n dest = Nodes_out->edges_array_dest[j];\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n }\n } #pragma omp parallel for private(v) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:change)", "context_chars": 100, "text": " }\n }\n\n\n // compressNodes( stats->num_vertices, stats->components);\n\n for(v = 0 ; v < ((bitmapNext->size + kBitsPerWord - 1) / kBitsPerWord); v++)\n {\n change += bitmapNext->bitarray[v];\n bitmapNext->bitarray[v] = 0;\n } #pragma omp parallel for reduction (+:change)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n Nodes = graph->vertices[src].outNodes;\n degree = graph->vertices[src].out_degree;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n\n dest = Nodes->dest;\n Nodes = Nodes->next;\n\n uint32_t comp_src = stats->components[src];\n uint32_t comp_dest = stats->components[dest];\n\n if(comp_src == comp_dest)\n continue;\n\n uint32_t comp_high = comp_src > comp_dest ? comp_src : comp_dest;\n uint32_t comp_low = comp_src + (comp_dest - comp_high);\n\n if(comp_high == stats->components[comp_high])\n {\n change = 1;\n stats->components[comp_high] = comp_low;\n }\n }\n } #pragma omp parallel for private(v,degree,Nodes) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "t(timer);\n for(r = 0; r < stats->neighbor_rounds; r++)\n {\n Start(timer_inner);\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n struct AdjLinkedListNode *Nodes = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = (0 + r) ; j < (degree_out) ; j++)\n {\n v = Nodes->dest;\n Nodes = Nodes->next;\n\n linkNodes(u, v, stats->components);\n break;\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "-----------------------------------------------------\\n\");\n Start(timer_inner);\n#if DIRECTED\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n struct AdjLinkedListNode *Nodes_out = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = ( 0 + stats->neighbor_rounds) ; j < (degree_out) ; j++)\n {\n v = Nodes_out->dest;\n Nodes_out = Nodes_out->next;\n\n linkNodes(u, v, stats->components);\n }\n\n struct AdjLinkedListNode *Nodes_in = graph->vertices[u].inNodes;\n uint32_t degree_in = graph->vertices[u].in_degree;\n\n for(j = (0) ; j < (degree_in) ; j++)\n {\n v = Nodes_in->dest;\n Nodes_in = Nodes_in->next;\n\n linkNodes(u, v, stats->components);\n }\n\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic, 2048)", "context_chars": 100, "text": "es_in = Nodes_in->next;\n\n linkNodes(u, v, stats->components);\n }\n\n }\n#else\n for(u = 0; u < graph->num_vertices; u++)\n {\n uint32_t j;\n uint32_t v;\n\n if(stats->components[u] == sampleComp)\n continue;\n\n struct AdjLinkedListNode *Nodes_out = graph->vertices[u].outNodes;\n uint32_t degree_out = graph->vertices[u].out_degree;\n\n for(j = ( 0 + stats->neighbor_rounds) ; j < (degree_out) ; j++)\n {\n v = Nodes_out->dest;\n Nodes_out = Nodes_out->next;\n\n\n linkNodes(u, v, stats->components);\n }\n } #pragma omp parallel for schedule(dynamic, 2048)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for private(v) schedule(dynamic, 512)", "context_chars": 100, "text": "change)\n {\n Start(timer_inner);\n change = 0;\n stats->iterations++;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t j;\n uint32_t src = v;\n uint32_t dest;\n\n struct AdjLinkedListNode *Nodes_out = graph->vertices[v].outNodes;\n uint32_t degree_out = graph->vertices[v].out_degree;\n\n for(j = 0 ; j < (degree_out) ; j++)\n {\n dest = Nodes_out->dest;\n Nodes_out = Nodes_out->next;\n\n if(atomicMin(&(stats->components[dest]), stats->components[src]))\n {\n setBitAtomic(bitmapNext, dest);\n }\n\n if(atomicMin(&(stats->components[src]), stats->components[dest]))\n {\n setBitAtomic(bitmapNext, src);\n }\n }\n } #pragma omp parallel for private(v) schedule(dynamic, 512)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/connectedComponents.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:change)", "context_chars": 100, "text": " }\n }\n\n\n // compressNodes( stats->num_vertices, stats->components);\n\n for(v = 0 ; v < ((bitmapNext->size + kBitsPerWord - 1) / kBitsPerWord); v++)\n {\n change += bitmapNext->bitarray[v];\n bitmapNext->bitarray[v] = 0;\n } #pragma omp parallel for reduction (+:change)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/betweennessCentrality.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = UINT32_MAX;\n stats->dependency[vertex_id] = 0.0f;\n stats->betweennessCentrality[vertex_id] = 0.0f;\n stats->sigma[vertex_id] = 0;\n stats->realRanks[vertex_id] = vertex_id;\n stats->stack->nodes[vertex_id] = 0;\n if(graph->vertices->out_degree[vertex_id])\n stats->parents[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);\n else\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/betweennessCentrality.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < stats->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = UINT32_MAX;\n stats->dependency[vertex_id] = 0.0f;\n stats->sigma[vertex_id] = 0;\n stats->stack->nodes[vertex_id] = 0;\n stats->predecessors[vertex_id].degree = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/betweennessCentrality.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": " graph->vertices;\n sorted_edges_array = graph->sorted_edges_array->edges_array_dest;\n#endif\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n out_degree = vertices->out_degree[v];\n if(stats->distances[v] == UINT32_MAX) // optmization\n {\n edge_idx = vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (bitmapCurr->bitarray[word_offset(u)]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n if(getBit(bitmapCurr, u))\n {\n // stats->parents[v] = u;\n stats->distances[v] = stats->distances[u] + 1;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->distances[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n if(stats->distances[v] == stats->distances[u] + 1)\n {\n\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->sigma[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->sigma[u]), 'w', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n stats->sigma[v] += stats->sigma[u];\n stats->predecessors[v].nodes[stats->predecessors[v].degree] = u;\n stats->predecessors[v].degree++;\n }\n\n setBitAtomic(bitmapNext, v);\n nf++;\n // break;\n }\n }\n }\n } #pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n // stats->parents_DualOrder[vertex_id] = 0;\n if(graph->vertices->out_degree[vertex_id])\n {\n stats->parents[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);\n stats->parents_DualOrder[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);\n }\n else\n {\n stats->parents[vertex_id] = -1;\n stats->parents_DualOrder[vertex_id] = -1;\n }\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "s->iteration = 0;\n stats->num_vertices = graph->num_vertices;\n stats->time_total = 0.0f;\n\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n if(graph->vertices[vertex_id].out_degree)\n stats->parents[vertex_id] = graph->vertices[vertex_id].out_degree * (-1);\n else\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)", "context_chars": 100, "text": "ptimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter\n for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)\n {\n stats->distances[vertex_id] = 0;\n if(graph->vertices[vertex_id].out_degree)\n stats->parents[vertex_id] = graph->vertices[vertex_id].out_degree * (-1);\n else\n stats->parents[vertex_id] = -1;\n } #pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(parents,parents_DualOrder,labels,num_vertices) num_threads(num_threads_max)", "context_chars": 100, "text": "int32_t vertex_v;\n int *parents_temp;\n uint32_t num_threads_max = omp_get_max_threads();\n\n for(vertex_id = 0; vertex_id < num_vertices ; vertex_id++)\n {\n vertex_v = labels[vertex_id];\n // vertex_u = inv_labels[vertex_id];\n\n if((*parents)[vertex_id] >= 0)\n {\n (*parents_DualOrder)[vertex_v] = labels[(*parents)[vertex_id]];\n }\n else\n {\n (*parents_DualOrder)[vertex_v] = (*parents)[vertex_id];\n }\n\n } #pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(parents,parents_DualOrder,labels,num_vertices) num_threads(num_threads_max)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(distances,distances_DualOrder,labels,num_vertices) num_threads(num_threads_max)", "context_chars": 100, "text": " vertex_u;\n uint32_t *distances_temp;\n uint32_t num_threads_max = omp_get_max_threads();\n\n for(vertex_id = 0; vertex_id < num_vertices ; vertex_id++)\n {\n vertex_v = labels[vertex_id];\n // vertex_u = inv_labels[vertex_id];\n distances_DualOrder[vertex_v] = distances[vertex_id];\n } #pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(distances,distances_DualOrder,labels,num_vertices) num_threads(num_threads_max)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024) num_threads(1)", "context_chars": 100, "text": " sorted_edges_array = graph->sorted_edges_array->edges_array_dest;\n#endif\n\n#ifdef CACHE_HARNESS\n #else\n #pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n out_degree = vertices->out_degree[v];\n if(stats->parents[v] < 0) // optmization\n {\n edge_idx = vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (bitmapCurr->bitarray[word_offset(u)]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n if(getBit(bitmapCurr, u))\n {\n stats->parents[v] = u;\n //we are not considering distance array as it is not implemented in AccelGraph\n stats->distances[v] = stats->distances[u] + 1;\n setBitAtomic(bitmapNext, v);\n // #ifdef CACHE_HARNESS\n // AccessDoubleTaggedCacheFloat(stats->cache, (uint64_t) & (stats->parents[v]), 'w', v, stats->parents[v]);\n // AccessDoubleTaggedCacheFloat(stats->cache, (uint64_t) & (bitmapNext->bitarray[word_offset(v)]), 'w', v, (bitmapNext->bitarray[word_offset(v)]));\n // \n nf++;\n break;\n }\n }\n\n }\n\n } #pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024) num_threads(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": "graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024) num_threads(1)\n#else\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n out_degree = vertices->out_degree[v];\n if(stats->parents[v] < 0) // optmization\n {\n edge_idx = vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (bitmapCurr->bitarray[word_offset(u)]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n if(getBit(bitmapCurr, u))\n {\n stats->parents[v] = u;\n //we are not considering distance array as it is not implemented in AccelGraph\n stats->distances[v] = stats->distances[u] + 1;\n setBitAtomic(bitmapNext, v);\n // #ifdef CACHE_HARNESS\n // AccessDoubleTaggedCacheFloat(stats->cache, (uint64_t) & (stats->parents[v]), 'w', v, stats->parents[v]);\n // AccessDoubleTaggedCacheFloat(stats->cache, (uint64_t) & (bitmapNext->bitarray[word_offset(v)]), 'w', v, (bitmapNext->bitarray[word_offset(v)]));\n // \n nf++;\n break;\n }\n }\n\n }\n\n } #pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": " graph->vertices;\n sorted_edges_array = graph->sorted_edges_array->edges_array_dest;\n#endif\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n out_degree = vertices->out_degree[v];\n if(stats->parents[v] < 0) // optmization\n {\n\n edge_idx = vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)\n {\n\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (bitmapCurr->bitarray[word_offset(u)]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n if(getBit(bitmapCurr, u))\n {\n stats->parents[v] = u;\n //we are not considering distance array as it is not implemented in AccelGraph\n stats->distances[v] = stats->distances[u] + 1;\n setBitAtomic(bitmapNext, v);\n // #ifdef CACHE_HARNESS\n // AccessDoubleTaggedCacheFloat(stats->cache, (uint64_t) & (stats->parents[v]), 'w', v, stats->parents[v]);\n // AccessDoubleTaggedCacheFloat(stats->cache, (uint64_t) & (bitmapNext->bitarray[word_offset(v)]), 'w', v, (bitmapNext->bitarray[word_offset(v)]));\n // \n nf++;\n break;\n }\n }\n\n }\n\n } #pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));\n\n\n uint32_t i;\n for(i = 0 ; i < P ; i++)\n {\n localFrontierQueues[i] = newArrayQueue(graph->num_vertices);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n for(i = 0 ; i < P ; i++)\n {\n freeArrayQueue(localFrontierQueues[i]);\n }\n\n // // for(i=0 ; i < P*P ; i++){\n // freeArrayQueue(localFrontierQueuesL2[i]);\n // } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));\n\n\n uint32_t i;\n for(i = 0 ; i < P ; i++)\n {\n localFrontierQueues[i] = newArrayQueue(graph->num_vertices);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n for(i = 0 ; i < P ; i++)\n {\n freeArrayQueue(localFrontierQueues[i]);\n }\n\n // // for(i=0 ; i < P*P ; i++){\n // freeArrayQueue(localFrontierQueuesL2[i]);\n // } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(i,stats,totalPartitions,localFrontierQueues ,sharedFrontierQueue, graph)", "context_chars": 100, "text": "xP\n\n\n\n\n uint32_t i;\n\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t t_id = omp_get_thread_num();\n // uint32_t A = 0;\n struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];\n\n\n if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j))\n {\n // #pragma omp task untied\n // {\n\n breadthFirstSearchPartitionGraphGrid(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), sharedFrontierQueue, localFrontierQueue, stats);\n flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);\n // }\n\n }\n } #pragma omp parallel for default(none) shared(i,stats,totalPartitions,localFrontierQueues ,sharedFrontierQueue, graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(graph,sharedFrontierQueue) private(i,v) schedule(dynamic,1024)", "context_chars": 100, "text": "aphGridResetActivePartitions(graph->grid);\n graphGridResetActivePartitionsMap(graph->grid);\n\n for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)\n {\n v = sharedFrontierQueue->queue[i];\n // graphGridSetActivePartitions(graph->grid, v);\n // if(getBit(graph->grid->activePartitionsMap,i))\n graphGridSetActivePartitionsMap(graph->grid, v);\n } #pragma omp parallel for default(none) shared(graph,sharedFrontierQueue) private(i,v) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(i,stats,totalPartitions,FrontierBitmapCurr ,FrontierBitmapNext, graph)", "context_chars": 100, "text": "// PxP\n\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n\n if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j) && graph->grid->partitions[(i * totalPartitions) + j].num_edges)\n {\n breadthFirstSearchPartitionGraphGridBitmap(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), FrontierBitmapCurr, FrontierBitmapNext, stats);\n }\n } #pragma omp parallel for default(none) shared(i,stats,totalPartitions,FrontierBitmapCurr ,FrontierBitmapNext, graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(graph,FrontierBitmap) private(i) schedule(dynamic,1024)", "context_chars": 100, "text": "itmap *FrontierBitmap)\n{\n\n uint32_t i;\n\n graphGridResetActivePartitionsMap(graph->grid);\n\n for(i = 0 ; i < FrontierBitmap->size; i++)\n {\n if(getBit(FrontierBitmap, i))\n graphGridSetActivePartitionsMap(graph->grid, i);\n } #pragma omp parallel for default(none) shared(graph,FrontierBitmap) private(i) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": "stats->processed_nodes += processed_nodes;\n\n\n uint32_t degree;\n struct EdgeList *Nodes;\n\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n if(stats->parents[v] < 0) // optmization\n {\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n if(getBit(bitmapCurr, u))\n {\n stats->parents[v] = u;\n setBitAtomic(bitmapNext, v);\n stats->distances[v] = stats->distances[u] + 1;\n nf++;\n break;\n }\n }\n\n }\n\n } #pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/BFS.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)", "context_chars": 100, "text": "ocessed_nodes += processed_nodes;\n\n\n uint32_t degree;\n struct AdjLinkedListNode *Nodes;\n\n\n for(v = 0 ; v < graph->num_vertices ; v++)\n {\n if(stats->parents[v] < 0) // optmization\n {\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n if(getBit(bitmapCurr, u))\n {\n stats->parents[v] = u;\n setBitAtomic(bitmapNext, v);\n stats->distances[v] = stats->distances[u] + 1;\n nf++;\n break;\n }\n }\n\n }\n\n } #pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(uint32_t));\n stats->buckets_map = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_t));\n stats->buckets_map = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_t));\n stats->buckets_map = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_t));\n stats->buckets_map = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_t));\n stats->buckets_map = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->buckets_map[v] = UINT_MAX / 2;\n stats->distances[v] = UINT_MAX / 2;\n stats->parents[v] = UINT_MAX;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:maxDistance) reduction(+:numberOfDiscoverNodes) reduction(min:minDistance)", "context_chars": 100, "text": "istance = UINT_MAX / 2;\n uint32_t maxDistance = 0;\n uint32_t numberOfDiscoverNodes = 0;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n\n if(stats->distances[v] != UINT_MAX / 2)\n {\n\n numberOfDiscoverNodes++;\n\n if(minDistance > stats->distances[v] && stats->distances[v] != 0)\n minDistance = stats->distances[v];\n\n if(maxDistance < stats->distances[v])\n maxDistance = stats->distances[v];\n\n\n }\n\n } #pragma omp parallel for reduction(max:maxDistance) reduction(+:numberOfDiscoverNodes) reduction(min:minDistance)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(e,weight) shared(graph,delta) reduction(+:edgesPlusCounter,edgesMinusCounter)", "context_chars": 100, "text": "MinusCounter = 0;\n uint32_t e;\n float weight;\n // uint32_t src;\n // uint32_t dest;\n\n for(e = 0 ; e < graph->num_edges ; e++)\n {\n\n // src = graph->sorted_edges_array[e].src;\n // dest = graph->sorted_edges_array[e].dest;\n\n\n weight = 1;\n#if WEIGHTED\n weight = graph->sorted_edges_array->edges_array_weight[e];\n\n\n\n\n if(weight > delta)\n {\n edgesPlusCounter++;\n\n }\n else if (weight <= delta)\n {\n edgesMinusCounter++;\n\n }\n } #pragma omp parallel for private(e,weight) shared(graph,delta) reduction(+:edgesPlusCounter,edgesMinusCounter)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(e,weight) shared(edgesMinus_idx,edgesPlus_idx,edgesPlus,edgesMinus,graph)", "context_chars": 100, "text": "t(edgesMinusCounter);\n#endif\n\n uint32_t edgesPlus_idx = 0;\n uint32_t edgesMinus_idx = 0;\n\n for(e = 0 ; e < graph->num_edges ; e++)\n {\n\n weight = 1;\n#if WEIGHTED\n weight = graph->sorted_edges_array->edges_array_weight[e];\n\n uint32_t index = 0;\n\n if(weight > delta)\n {\n index = __sync_fetch_and_add(&edgesPlus_idx, 1);\n\n edgesPlus->edges_array_dest[index] = graph->sorted_edges_array->edges_array_dest[e];\n edgesPlus->edges_array_src[index] = graph->sorted_edges_array->edges_array_src[e];\n#if WEIGHTED\n edgesPlus->edges_array_weight[index] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n#if DIRECTED\n edgesPlusInverse->edges_array_dest[index] = graph->sorted_edges_array->edges_array_src[e];\n edgesPlusInverse->edges_array_src[index] = graph->sorted_edges_array->edges_array_dest[e];\n#if WEIGHTED\n edgesPlusInverse->edges_array_weight[index] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n\n\n }\n else if (weight <= delta)\n {\n index = __sync_fetch_and_add(&edgesMinus_idx, 1);\n\n edgesMinus->edges_array_dest[index] = graph->sorted_edges_array->edges_array_dest[e];\n edgesMinus->edges_array_src[index] = graph->sorted_edges_array->edges_array_src[e];\n#if WEIGHTED\n edgesMinus->edges_array_weight[index] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n#if DIRECTED\n edgesMinusInverse->edges_array_dest[index] = graph->sorted_edges_array->edges_array_src[e];\n edgesMinusInverse->edges_array_src[index] = graph->sorted_edges_array->edges_array_dest[e];\n#if WEIGHTED\n edgesMinusInverse->edges_array_weight[index] = graph->sorted_edges_array->edges_array_weight[e];\n\n\n\n\n }\n } #pragma omp parallel for private(e,weight) shared(edgesMinus_idx,edgesPlus_idx,edgesPlus,edgesMinus,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(bitmapSetCurr, graph, stats) reduction(+ : activeVertices)", "context_chars": 100, "text": "= 0;\n // uint32_t buckets_total_local =\n // process light edges\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n if(__sync_bool_compare_and_swap(&(stats->buckets_map[v]), stats->bucket_current, (UINT_MAX / 2)))\n {\n // if(stats->buckets_map[v] == stats->bucket_current) {\n\n // pop vertex from bucket list\n setBitAtomic(bitmapSetCurr, v);\n\n #pragma omp atomic update\n stats->buckets_total--;\n\n // stats->buckets_map[v] = UINT_MAX/2;\n\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t src = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graph->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += SSSPRelax(src, dest, weight, stats);\n else\n activeVertices += SSSPAtomicRelax(src, dest, weight, stats);\n }\n\n }\n }\n\n Stop(timer_inner);\n\n if(activeVertices)\n printf(\"| L%-14u | %-15u | %-15f |\\n\", iter, stats->buckets_total, Seconds(timer_inner));\n } #pragma omp parallel for private(v) shared(bitmapSetCurr, graph, stats) reduction(+ : activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(bitmapSetCurr, graphLight, stats) reduction(+ : activeVertices)", "context_chars": 100, "text": "r = 0;\n // uint32_t buckets_total_local =\n // process light edges\n for(v = 0; v < graphLight->num_vertices; v++)\n {\n\n if(__sync_bool_compare_and_swap(&(stats->buckets_map[v]), stats->bucket_current, (UINT_MAX / 2)))\n {\n // if(stats->buckets_map[v] == stats->bucket_current) {\n\n // pop vertex from bucket list\n setBitAtomic(bitmapSetCurr, v);\n\n #pragma omp atomic update\n stats->buckets_total--;\n\n // stats->buckets_map[v] = UINT_MAX/2;\n\n uint32_t degree = graphLight->vertices->out_degree[v];\n uint32_t edge_idx = graphLight->vertices->edges_idx[v];\n uint32_t j;\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t src = EXTRACT_VALUE(graphLight->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graphLight->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graphLight->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += SSSPRelax(src, dest, weight, stats);\n else\n activeVertices += SSSPAtomicRelax(src, dest, weight, stats);\n }\n\n }\n }\n\n Stop(timer_inner);\n\n if(activeVertices)\n printf(\"| L%-14u | %-15u | %-15f |\\n\", iter, stats->buckets_total, Seconds(timer_inner));\n } #pragma omp parallel for private(v) shared(bitmapSetCurr, graphLight, stats) reduction(+ : activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/SSSP.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(bitmapSetCurr, graphHeavy, stats) reduction(+ : activeVertices)", "context_chars": 100, "text": "Marker(2, iteration);\n SimMarker(3, iteration);\n#endif\n\n Start(timer_inner);\n\n for(v = 0; v < graphHeavy->num_vertices; v++)\n {\n if(getBit(bitmapSetCurr, v))\n {\n\n uint32_t degree = graphHeavy->vertices->out_degree[v];\n uint32_t edge_idx = graphHeavy->vertices->edges_idx[v];\n uint32_t j;\n\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t src = EXTRACT_VALUE(graphHeavy->sorted_edges_array->edges_array_src[j]);\n uint32_t dest = EXTRACT_VALUE(graphHeavy->sorted_edges_array->edges_array_dest[j]);\n float weight = 1;\n#if WEIGHTED\n weight = graphHeavy->sorted_edges_array->edges_array_weight[j];\n\n\n if(arguments->algo_numThreads == 1)\n activeVertices += SSSPRelax(src, dest, weight, stats);\n else\n activeVertices += SSSPAtomicRelax(src, dest, weight, stats);\n }\n }\n } #pragma omp parallel for private(v) shared(bitmapSetCurr, graphHeavy, stats) reduction(+ : activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));;\n stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->base_pr;\n stats->realRanks[v] = v;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));;\n stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->base_pr;\n stats->realRanks[v] = v;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));;\n stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->base_pr;\n stats->realRanks[v] = v;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats)", "context_chars": 100, "text": "int32_t));;\n stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;\n\n\n for(v = 0; v < stats->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->base_pr;\n stats->realRanks[v] = v;\n } #pragma omp parallel for default(none) private(v) shared(stats)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": "\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->grid->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "amEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);\n\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n #pragma omp parallel for private(j)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n // __sync_fetch_and_add(&pageRanksNext[dest],riDividedOnDiClause[src]);\n // addAtomicFloat(float *num, float value)\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n }\n }\n } #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "lPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n // __sync_fetch_and_add(&pageRanksNext[dest],riDividedOnDiClause[src]);\n // addAtomicFloat(float *num, float value)\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext, stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "geRanksNext[dest] += riDividedOnDiClause[src];\n }\n }\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext, stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->grid->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "amEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);\n\n uint32_t i;\n // for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n #pragma omp parallel for private(j)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n }\n }\n } #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "lPartitions; ++i) // iterate over partitions rowwise\n {\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "geRanksNext[dest] += riDividedOnDiClause[src];\n }\n }\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": "\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->grid->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "treamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);\n\n uint32_t j;\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t i;\n\n // #pragma omp parallel for private(i) // iterate over partitions columnwise\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n\n // addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(i) ", "context_chars": 100, "text": "(j)\n for (j = 0; j < totalPartitions; ++j)\n {\n uint32_t i;\n\n // for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n\n // addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n } #pragma omp parallel for private(i) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n }\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": "\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->grid->out_degree[v])\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->grid->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "reamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);\n\n uint32_t j;\n\n for (j = 0; j < totalPartitions; ++j) // iterate over partitions columnwise\n {\n uint32_t i;\n for (i = 0; i < totalPartitions; ++i)\n {\n uint32_t k;\n uint32_t src;\n uint32_t dest;\n struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];\n for (k = 0; k < partition->num_edges; ++k)\n {\n src = partition->edgeList->edges_array_src[k];\n dest = partition->edgeList->edges_array_dest[k];\n\n // #pragma omp atomic update\n pageRanksNext[dest] += riDividedOnDiClause[src];\n\n // addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ageRanksNext[dest] , riDividedOnDiClause[src]);\n }\n }\n }\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (riDividedOnDiClause[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n pageRanksNext[v] = nodeIncomingPR;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[v]), 'w', v, pageRanksNext[v]);\n\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "eRanksNext[v]);\n#endif\n }\n\n#ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[u]), 'r', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[u]), 'w', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n\n }\n } #pragma omp parallel for default(none) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "\n#endif\n\n }\n }\n\n#ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (riDividedOnDiClause[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[v]), 'w', v, pageRanksNext[v]);\n\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "eRanksNext[v]);\n#endif\n }\n\n#ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = FloatToFixed32(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (riDividedOnDiClause[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[v]), 'w', v, pageRanksNext[v]);\n\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "eRanksNext[v]);\n#endif\n }\n\n#ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed32ToFloat(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = FloatToFixed16(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (riDividedOnDiClause[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[v]), 'w', v, pageRanksNext[v]);\n\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "eRanksNext[v]);\n#endif\n }\n\n#ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed16ToFloat(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = FloatToFixed8(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n pageRanksNext[v] += riDividedOnDiClause[u];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (riDividedOnDiClause[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[v]), 'w', v, pageRanksNext[v]);\n\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "eRanksNext[v]);\n#endif\n }\n\n#ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed8ToFloat(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n // pageRanksFP[v]=stats->base_prFP;\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n {\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices->out_degree[v]);\n // riDividedOnDiClause[v] = DIVFixed64V1(pageRanksFP[v],UInt64ToFixed(graph->vertices[v].out_degree));\n }\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) schedule(dynamic, 1024) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[u]), 'r', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[u]), 'w', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n }\n } #pragma omp parallel for default(none) schedule(dynamic, 1024) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": ";\n#endif\n }\n }\n\n#ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n // pageRanksFP[v] = FloatToFixed(nextPageRank);\n pageRanksNext[v] = 0;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)", "context_chars": 100, "text": ".........................................\\n\");\n\n //2. Quantize riDividedOnDiClause[]\n for(v = 0; v < graph->num_vertices; v++)\n {\n riDividedOnDiClause_quant[v] = quantize_32(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint64_t nodeIncomingPR = 0;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause_quant[u];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (riDividedOnDiClause_quant[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n pageRanksNext[v] = rDivD_params.scale * nodeIncomingPR;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[v]), 'w', v, pageRanksNext[v]);\n\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n //uint64_t temp_degree = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v];\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs(nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n //temp_degree += vertices[v].in_degree;\n }\n } #pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)", "context_chars": 100, "text": ".........................................\\n\");\n\n //2. Quantize riDividedOnDiClause[]\n for(v = 0; v < graph->num_vertices; v++)\n {\n riDividedOnDiClause_quant[v] = quantize_16(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint64_t nodeIncomingPR = 0;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause_quant[u];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (riDividedOnDiClause_quant[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n pageRanksNext[v] = rDivD_params.scale * nodeIncomingPR;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[v]), 'w', v, pageRanksNext[v]);\n\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n //uint64_t temp_degree = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v];\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs(nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n //temp_degree += vertices[v].in_degree;\n }\n } #pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)", "context_chars": 100, "text": ".........................................\\n\");\n\n //2. Quantize riDividedOnDiClause[]\n for(v = 0; v < graph->num_vertices; v++)\n {\n riDividedOnDiClause_quant[v] = quantize_8(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n uint64_t nodeIncomingPR = 0;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause_quant[u];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (riDividedOnDiClause_quant[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n //nodeIncomingPR -= (degree * rDivD_params.zero);\n pageRanksNext[v] = rDivD_params.scale * nodeIncomingPR;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[v]), 'w', v, pageRanksNext[v]);\n\n } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "fdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n\n //uint64_t temp_degree = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v];\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs(nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n //temp_degree += vertices[v].in_degree;\n }\n } #pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": "\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "ams.max,rDivD_params.scale,rDivD_params.zero);\n\n //2. Quantize riDividedOnDiClause[]\n for(v = 0; v < graph->num_vertices; v++)\n {\n riDividedOnDiClause_quant[v] = quantize(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(stats,rDivD_params,riDividedOnDiClause_quant,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "def SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n uint32_t degree = graph->vertices->out_degree[v];\n uint32_t edge_idx = graph->vertices->edges_idx[v];\n uint32_t j;\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n #pragma omp atomic update\n pageRanksNext[u] += rDivD_params.scale * (riDividedOnDiClause_quant[v] - rDivD_params.zero);\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[u]), 'r', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (pageRanksNext[u]), 'w', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n }\n } #pragma omp parallel for default(none) private(v) shared(stats,rDivD_params,riDividedOnDiClause_quant,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, stats,pageRanksNext) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": ";\n#endif\n }\n }\n\n#ifdef SNIPER_HARNESS\n SimMarker(2, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v];\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0;\n double error = fabs(nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, stats,pageRanksNext) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:activeVertices)", "context_chars": 100, "text": "----------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n for(i = 0; i < graph->num_vertices; i++)\n {\n workListNext[i] = 1;\n activeVertices++;\n } #pragma omp parallel for reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices->out_degree[v])\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,sorted_edges_array,vertices,workListCurr,workListNext,stats,graph) private(v) reduction(+:activeVertices,error_total) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n uint32_t edge_idx;\n uint32_t degree;\n uint32_t j;\n uint32_t u;\n double error = 0;\n float nodeIncomingPR = 0;\n degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i))\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (riDividedOnDiClause[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n error = fabs(newPageRank - oldPageRank);\n error_total += error / graph->num_vertices;\n if(error >= arguments->epsilon)\n {\n stats->pageRanks[v] = newPageRank;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->pageRanks[v]), 'w', v, stats->pageRanks[v]);\n\n degree = graph->vertices->out_degree[v];\n edge_idx = graph->vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n\n #pragma omp atomic write\n workListNext[u] = 1;\n\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (workListNext[u]), 'w', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n\n // uint8_t old_val = workListNext[u];\n // if(!old_val){\n // __sync_bool_compare_and_swap(&workListNext[u], 0, 1);\n // }\n }\n activeVertices++;\n }\n }\n } #pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,sorted_edges_array,vertices,workListCurr,workListNext,stats,graph) private(v) reduction(+:activeVertices,error_total) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n if(graph->vertices->out_degree[u])\n aResiduals[v] += 1.0f / graph->vertices->out_degree[u]; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "ef SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = aResiduals[v] + stats->pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n // #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n degree = graph->vertices->out_degree[v];\n float delta = stats->damp * (aResiduals[v] / degree);\n edge_idx = graph->vertices->edges_idx[v];\n\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n #pragma omp atomic update\n aResiduals[u] += delta;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (aResiduals[u]), 'r', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (aResiduals[u]), 'w', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (workListNext[u]), 'r', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n if(!workListNext[u])\n {\n // #pragma omp atomic write\n workListNext[u] = 1;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (workListNext[u]), 'w', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0f;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n if(graph->vertices->out_degree[u])\n aResiduals[v] += 1.0f / graph->vertices->out_degree[u]; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,vertices,sorted_edges_array,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)", "context_chars": 100, "text": "def SNIPER_HARNESS\n int iter = stats->iterations;\n SimMarker(1, iter);\n#endif\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n\n float nodeIncomingPR = 0.0f;\n degree = vertices->out_degree[v];\n edge_idx = vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(sorted_edges_array[j]);\n nodeIncomingPR += stats->pageRanks[u] / graph->vertices->out_degree[u];\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (stats->pageRanks[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (graph->vertices->out_degree[u]), 'r', u, EXTRACT_MASK(sorted_edges_array[j]));\n\n }\n\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n float oldPageRank = stats->pageRanks[v];\n // float newPageRank = aResiduals[v]+pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n degree = graph->vertices->out_degree[v];\n float delta = stats->damp * (aResiduals[v] / degree);\n edge_idx = graph->vertices->edges_idx[v];\n for(j = edge_idx ; j < (edge_idx + degree) ; j++)\n {\n u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (aResiduals[u]), 'r', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (aResiduals[u]), 'w', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n aResiduals[u] += delta;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (workListNext[u]), 'r', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n if(!workListNext[u])\n {\n workListNext[u] = 1;\n#ifdef CACHE_HARNESS\n AccessDoubleTaggedCacheUInt32(stats->cache, (uint64_t) & (workListNext[u]), 'w', u, EXTRACT_MASK(graph->sorted_edges_array->edges_array_dest[j]));\n\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,vertices,sorted_edges_array,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " }// end iteration loop\n\n#ifdef SNIPER_HARNESS\n SimRoiEnd();\n#endif\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "s[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ces[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)", "context_chars": 100, "text": "lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));\n\n\n\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n } #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)", "context_chars": 100, "text": "[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n Nodes = graph->vertices[v].outNodes;\n uint32_t degree = graph->vertices[v].out_degree;\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->edges_array_dest[j];\n\n // omp_set_lock(&(vertex_lock[u]));\n // pageRanksNext[u] += riDividedOnDiClause[v];\n // omp_unset_lock((&vertex_lock[u]));\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);\n // printf(\"tid %u degree %u edge_idx %u v %u u %u \\n\",tid,degree,edge_idx,v,u );\n\n // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": " // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------\\n\");\n // pageRankPrint(pageRanks, graph->num_vertices);\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_destroy_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ces[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)", "context_chars": 100, "text": "_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));\n\n\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n } #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)", "context_chars": 100, "text": "v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n Nodes = graph->vertices[v].outNodes;\n uint32_t degree = graph->vertices[v].out_degree;\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->edges_array_dest[j];\n\n // omp_set_lock(&(vertex_lock[u]));\n // pageRanksNext[u] += riDividedOnDiClause[v];\n // omp_unset_lock((&vertex_lock[u]));\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);\n // printf(\"tid %u degree %u edge_idx %u v %u u %u \\n\",tid,degree,edge_idx,v,u );\n\n // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": " // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------\\n\");\n // pageRankPrint(pageRanks, graph->num_vertices);\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_destroy_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:activeVertices)", "context_chars": 100, "text": "----------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n for(i = 0; i < graph->num_vertices; i++)\n {\n workListNext[i] = 1;\n activeVertices++;\n } #pragma omp parallel for reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,workListCurr,workListNext,stats,graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)", "context_chars": 100, "text": "s[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n\n uint32_t degree;\n uint32_t j;\n uint32_t u;\n double error = 0;\n float nodeIncomingPR = 0;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i))\n }\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n error = fabs(newPageRank - oldPageRank);\n error_total += error / graph->num_vertices;\n if(error >= arguments->epsilon)\n {\n stats->pageRanks[v] = newPageRank;\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n\n #pragma omp atomic write\n workListNext[u] = 1;\n // uint8_t old_val = workListNext[u];\n // if(!old_val){\n // __sync_bool_compare_and_swap(&workListNext[u], 0, 1);\n // }\n }\n activeVertices++;\n }\n }\n } #pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,workListCurr,workListNext,stats,graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "---------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n if(graph->vertices[u].out_degree)\n aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = aResiduals[v] + stats->pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n // #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n float delta = stats->damp * (aResiduals[v] / degree);\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n if(!workListNext[u])\n {\n\n // #pragma omp atomic write\n workListNext[u] = 1;\n\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0f;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n if(graph->vertices[u].out_degree)\n aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->edges_array_dest[j];\n nodeIncomingPR += stats->pageRanks[u] / graph->vertices[u].out_degree;\n }\n\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n float oldPageRank = stats->pageRanks[v];\n // float newPageRank = aResiduals[v]+pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n float delta = stats->damp * (aResiduals[v] / degree);\n\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->edges_array_dest[j];\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n if(!workListNext[u])\n {\n workListNext[u] = 1;\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "s[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ces[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)", "context_chars": 100, "text": "lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));\n\n\n\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n } #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)", "context_chars": 100, "text": "[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n Nodes = graph->vertices[v].outNodes;\n uint32_t degree = graph->vertices[v].out_degree;\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->dest;\n Nodes = Nodes->next;\n\n // omp_set_lock(&(vertex_lock[u]));\n // pageRanksNext[u] += riDividedOnDiClause[v];\n // omp_unset_lock((&vertex_lock[u]));\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);\n // printf(\"tid %u degree %u edge_idx %u v %u u %u \\n\",tid,degree,edge_idx,v,u );\n\n // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": " // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------\\n\");\n // pageRankPrint(pageRanks, graph->num_vertices);\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_destroy_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)", "context_chars": 100, "text": ")\");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0;\n } #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s++)\n {\n error_total = 0;\n activeVertices = 0;\n Start(timer_inner);\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)", "context_chars": 100, "text": "[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": "ces[v].out_degree;\n }\n\n pageRanksNext[v] = nodeIncomingPR;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)", "context_chars": 100, "text": "lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));\n\n\n\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_init_lock(&(vertex_lock[i]));\n } #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)", "context_chars": 100, "text": ");\n printf(\" -----------------------------------------------------\\n\");\n\n Start(timer);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n pageRanksNext[v] = 0.0f;\n } #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)", "context_chars": 100, "text": "s++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n\n } #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)", "context_chars": 100, "text": "v].out_degree);\n else\n riDividedOnDiClause[v] = 0.0f;\n\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n Nodes = graph->vertices[v].outNodes;\n uint32_t degree = graph->vertices[v].out_degree;\n // uint32_t tid = omp_get_thread_num();\n uint32_t j;\n\n for(j = 0 ; j < (degree) ; j++)\n {\n uint32_t u = Nodes->dest;\n Nodes = Nodes->next;\n // omp_set_lock(&(vertex_lock[u]));\n // pageRanksNext[u] += riDividedOnDiClause[v];\n // omp_unset_lock((&vertex_lock[u]));\n\n #pragma omp atomic update\n pageRanksNext[u] += riDividedOnDiClause[v];\n\n // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);\n // printf(\"tid %u degree %u edge_idx %u v %u u %u \\n\",tid,degree,edge_idx,v,u );\n\n // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)", "context_chars": 100, "text": " // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);\n }\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n float prevPageRank = stats->pageRanks[v];\n float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));\n stats->pageRanks[v] = nextPageRank;\n pageRanksNext[v] = 0.0f;\n double error = fabs( nextPageRank - prevPageRank);\n error_total += (error / graph->num_vertices);\n\n if(error >= arguments->epsilon)\n {\n activeVertices++;\n }\n } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------\\n\");\n // pageRankPrint(pageRanks, graph->num_vertices);\n\n for (i = 0; i < graph->num_vertices; i++)\n {\n omp_destroy_lock(&(vertex_lock[i]));\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:activeVertices)", "context_chars": 100, "text": "----------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n for(i = 0; i < graph->num_vertices; i++)\n {\n workListNext[i] = 1;\n activeVertices++;\n } #pragma omp parallel for reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(graph->vertices[v].out_degree)\n riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) shared(arguments, riDividedOnDiClause, workListCurr, workListNext, stats, graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)", "context_chars": 100, "text": "s[v].out_degree;\n else\n riDividedOnDiClause[v] = 0.0f;\n }\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n uint32_t degree;\n uint32_t j;\n uint32_t u;\n double error = 0;\n float nodeIncomingPR = 0;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i))\n }\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n error = fabs(newPageRank - oldPageRank);\n error_total += error / graph->num_vertices;\n if(error >= arguments->epsilon)\n {\n stats->pageRanks[v] = newPageRank;\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n #pragma omp atomic write\n workListNext[u] = 1;\n // uint8_t old_val = workListNext[u];\n // if(!old_val){\n // __sync_bool_compare_and_swap(&workListNext[u], 0, 1);\n // }\n }\n activeVertices++;\n }\n }\n } #pragma omp parallel for default(none) shared(arguments, riDividedOnDiClause, workListCurr, workListNext, stats, graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": " if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n if(graph->vertices[u].out_degree)\n aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n float oldPageRank = stats->pageRanks[v];\n float newPageRank = aResiduals[v] + stats->pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n // #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n float delta = stats->damp * (aResiduals[v] / degree);\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n if(!workListNext[u])\n {\n\n // #pragma omp atomic write\n workListNext[u] = 1;\n\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for private(Nodes,degree,v,j,u) shared(stats,workListCurr,workListNext,aResiduals) reduction(+:activeVertices)", "context_chars": 100, "text": "--------------------------------------------\\n\");\n\n Start(timer);\n\n Start(timer_inner);\n\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n\n aResiduals[v] = 0.0f;\n workListCurr[v] = 1;\n workListNext[v] = 0;\n activeVertices++;\n\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n if(graph->vertices[u].out_degree)\n aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))\n }\n aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];\n } #pragma omp parallel for private(Nodes,degree,v,j,u) shared(stats,workListCurr,workListNext,aResiduals) reduction(+:activeVertices)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)", "context_chars": 100, "text": "++)\n {\n Start(timer_inner);\n error_total = 0;\n activeVertices = 0;\n\n for(v = 0; v < graph->num_vertices; v++)\n {\n if(workListCurr[v])\n {\n\n float nodeIncomingPR = 0.0f;\n\n#if DIRECTED // will look at the other neighbours if directed by using inverese edge list\n Nodes = graph->vertices[v].inNodes;\n degree = graph->vertices[v].in_degree;\n#else\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n nodeIncomingPR += stats->pageRanks[u] / graph->vertices[u].out_degree;\n }\n\n float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);\n float oldPageRank = stats->pageRanks[v];\n // float newPageRank = aResiduals[v]+pageRanks[v];\n error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);\n\n #pragma omp atomic write\n stats->pageRanks[v] = newPageRank;\n\n Nodes = graph->vertices[v].outNodes;\n degree = graph->vertices[v].out_degree;\n\n float delta = stats->damp * (aResiduals[v] / degree);\n\n\n\n for(j = 0 ; j < (degree) ; j++)\n {\n u = Nodes->dest;\n Nodes = Nodes->next;\n float prevResidual = 0.0f;\n\n prevResidual = aResiduals[u];\n\n #pragma omp atomic update\n aResiduals[u] += delta;\n\n if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))\n {\n activeVertices++;\n if(!workListNext[u])\n {\n workListNext[u] = 1;\n }\n }\n }\n aResiduals[v] = 0.0f;\n }\n } #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/atmughrabi/OpenGraphSim/00_graph_bench/src/algorithms/openmp/pageRank.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "if(activeVertices == 0)\n break;\n\n }// end iteration loop\n\n\n double sum = 0.0f;\n for(v = 0; v < graph->num_vertices; v++)\n {\n stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;\n sum += stats->pageRanks[v];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/taeguk/dist-prog-assignment/assignment-4/p1/main.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic)", "context_chars": 100, "text": "SIZE], float B[MATRIX_SIZE][MATRIX_SIZE])\n{\n double start, finish;\n\n start = omp_get_wtime();\nfor(int r=0; r #pragma omp parallel for schedule(dynamic)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/taeguk/dist-prog-assignment/assignment-3/p2/example2_omp.cpp", "omp_pragma_line": "#pragma omp parallel for schedule(runtime) num_threads(thread_cnt)", "context_chars": 100, "text": " }\n\n int *rand_num = new int[N];\n for(int i=0; ifor(int i=0; i #pragma omp parallel for schedule(runtime) num_threads(thread_cnt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/taeguk/dist-prog-assignment/assignment-3/p2/example_omp.cpp", "omp_pragma_line": "#pragma omp parallel for schedule(runtime) num_threads(thread_cnt)", "context_chars": 100, "text": "/2.0;\n const int maxiter = 100000;\n\n double start, finish;\n\n start = omp_get_wtime();\n\n for(int pix=0; pix #pragma omp parallel for schedule(runtime) num_threads(thread_cnt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/taeguk/dist-prog-assignment/assignment-3/p2/example3_omp.cpp", "omp_pragma_line": "#pragma omp parallel for schedule(runtime) num_threads(thread_cnt)", "context_chars": 100, "text": "t[cnt[i]];\n for (int j = 0; j < cnt[i]; ++j)\n rand_num[i][j] = rand();\n }\n\n for(int i=0; i #pragma omp parallel for schedule(runtime) num_threads(thread_cnt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/taeguk/dist-prog-assignment/assignment-3/p1/vec_parallel_optimized.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:palin_cnt) schedule(dynamic) num_threads(thread_cnt)", "context_chars": 100, "text": "of());\n }\n\n vector > * results = new vector >[thread_cnt];\n\n for (int i=0; i::iterator iter = find(words.begin(), words.begin() + i + 1, reverse_word);\n if (iter != words.begin() + i + 1) {\n results[tid].push_back(make_pair(iter - words.begin(), i));\n ++palin_cnt;\n }\n } #pragma omp parallel for reduction(+:palin_cnt) schedule(dynamic) num_threads(thread_cnt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/taeguk/dist-prog-assignment/assignment-3/p1/vec_parallel.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:palin_cnt) schedule(dynamic) num_threads(thread_cnt)", "context_chars": 100, "text": " words.push_back(word);\n ifs >> word;\n } while (!ifs.eof());\n }\n\n for (int i=0; i::iterator iter = find(words.begin(), words.begin() + i + 1, reverse_word);\n if (iter != words.begin() + i + 1) {\n #pragma omp critical(output)\n ofs << reverse_word << \" \" << words[i] << endl;\n ++palin_cnt;\n }\n } #pragma omp parallel for reduction(+:palin_cnt) schedule(dynamic) num_threads(thread_cnt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/DLopezMadrid/tracerr/src/Render.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ":move(shape));\n }\n }\n float dir_x, dir_y, dir_z;\n dir_z = -height_ / (2.f * tan(fov_ / 2.f));\nfor (int row = 0; row < height_; row++) {\n dir_y = -(row + 0.5f) + height_ / 2.f;// this flips the image at the same time\n for (int col = 0; col < width_; col++) {\n dir_x = (col + 0.5f) - width_ / 2.f;\n xyz dir{dir_x, dir_y, dir_z};\n dir.normalize();\n rgb_f pix = cast_ray(image_origin_, dir, 0);\n rgb rgb_val = Material::vec2rgb(pix);\n image_.SetPixelColor({col, row}, rgb_val);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Source Codes/C language/fox_floats_timer_caching_omp_fileIO.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Source Codes/C language/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Source Codes/C language/fox_floats_timer_caching_omp.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProblemScale/256*256/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProblemScale/128*128/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProblemScale/512*512/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProblemScale/8192*8192/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProblemScale/4096*4096/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProblemScale/64*64/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProblemScale/16384*16384/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProblemScale/1024*1024/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProblemScale/2048*2048/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon E5 CPUs/ProcessScale/4*4/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon Phi KNL MIC/ProblemScale/256*256/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon Phi KNL MIC/ProblemScale/128*128/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon Phi KNL MIC/ProblemScale/512*512/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon Phi KNL MIC/ProblemScale/8192*8192/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon Phi KNL MIC/ProblemScale/4096*4096/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon Phi KNL MIC/ProblemScale/64*64/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon Phi KNL MIC/ProblemScale/1024*1024/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon Phi KNL MIC/ProblemScale/2048*2048/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/PKU-HPC/Testing on Intel Xeon Phi KNL MIC/ProcessScale/4*4*8/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yangyang14641/Parallel-Matrix-Multiplication-FOX-Algorithm/Code Tests/Dell XPS8900/Testing on Intel Core i7 CPU/Trace Analyzer Test/fox_floats_timer_caching_omp_fileIO_benchmark.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) ", "context_chars": 100, "text": "m_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator\n \n for (i = 0; i < Order(local_A); i++) {\n // printf(\"Current in the Fox Kernel:\\n my process id is %d, my thread id is %d\\n\",my_rank,omp_get_thread_num()); \n for (j = 0; j < Order(local_A); j++) \n for (k = 0; k < Order(local_B); k++)\n Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage\n + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) \n /* Entry(local_C,i,j) = Entry(local_C,i,j) \n + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper\n */\n } #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/level.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "st* zone element of first component to a 256-Byte boundary\n uint64_t ofs;\n #ifdef _OPENMP\n for(ofs=0;ofs<(uint64_t)numVectors*level->num_my_boxes*level->box_volume;ofs++){tmpbuf[ofs]=0.0;} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/level.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "box_volume*sizeof(double), level->um_access_policy);\n uint64_t ofs;\n #ifdef _OPENMP\n for(ofs=0;ofs<(uint64_t)level->num_my_boxes*level->box_volume;ofs++){level->vectors[c][ofs]=0.0;} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/solvers/matmul.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1) collapse(2)", "context_chars": 100, "text": "ric [G,g], do the all_reduce on the upper triangle and then duplicate (saves BW)\n #ifdef _OPENMP\n for(mm=0;mm=mm){ // upper triangular\n int box;\n double a_dot_b_level = 0.0;\n for(box=0;boxnum_my_boxes;box++){\n int i,j,k;\n const int jStride = level->my_boxes[box].jStride;\n const int kStride = level->my_boxes[box].kStride;\n const int ghosts = level->my_boxes[box].ghosts;\n const int dim = level->my_boxes[box].dim;\n double * __restrict__ grid_a = level->my_boxes[box].vectors[id_A[mm]] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point\n double * __restrict__ grid_b = level->my_boxes[box].vectors[id_B[nn]] + ghosts*(1+jStride+kStride); \n double a_dot_b_box = 0.0;\n for(k=0;k #pragma omp parallel for schedule(static,1) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/problem.fv.c", "omp_pragma_line": "#pragma omp parallel for private(k,j,i) collapse(3)", "context_chars": 100, "text": " level->my_boxes[box].dim;\n const int dim_k = level->my_boxes[box].dim;\n #ifdef _OPENMP\n for(k=0;k<=dim_k;k++){ // include high face\n for(j=0;j<=dim_j;j++){ // include high face\n for(i=0;i<=dim_i;i++){ // include high face\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;\n double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell\n double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );\n double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );\n double A,Bi,Bj,Bk;\n //double A,B,Bx,By,Bz,Bi,Bj,Bk;\n //double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n A = 1.0;\n Bi = 1.0;\n Bj = 1.0;\n Bk = 1.0;\n #ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...\n Bi=evaluateBeta(x-hLevel*0.5,y ,z ,hLevel,0,1,1); // face-centered value of Beta for beta_i\n Bj=evaluateBeta(x ,y-hLevel*0.5,z ,hLevel,1,0,1); // face-centered value of Beta for beta_j\n Bk=evaluateBeta(x ,y ,z-hLevel*0.5,hLevel,1,1,0); // face-centered value of Beta for beta_k\n \n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n double F=evaluateF(x,y,z,hLevel,1,1,1);\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;\n level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;\n level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;\n level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;\n level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = 0.0;\n level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n }}} #pragma omp parallel for private(k,j,i) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/problem.p6.c", "omp_pragma_line": "#pragma omp parallel for private(k,j,i) collapse(3)", "context_chars": 100, "text": " level->my_boxes[box].dim;\n const int dim_k = level->my_boxes[box].dim;\n #ifdef _OPENMP\n for(k=0;k<=dim_k;k++){ // include high face\n for(j=0;j<=dim_j;j++){ // include high face\n for(i=0;i<=dim_i;i++){ // include high face\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;\n double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell\n double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );\n double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );\n double A,B,Bx,By,Bz,Bi,Bj,Bk;\n double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n A = 1.0;\n B = 1.0;\n Bx = 0.0;\n By = 0.0;\n Bz = 0.0; \n Bi = 1.0;\n Bj = 1.0;\n Bk = 1.0;\n #ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...\n evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i\n evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j\n evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k\n evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta\n \n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );\n double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;\n level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;\n level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;\n level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;\n level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U;\n level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n }}} #pragma omp parallel for private(k,j,i) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_p0.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_f->interpolation.num_recvs>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_recvs;n++){\n MPI_Irecv(level_f->interpolation.recv_buffers[n],\n level_f->interpolation.recv_sizes[n],\n MPI_DOUBLE,\n level_f->interpolation.recv_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &recv_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_p0.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_c->interpolation.num_sends>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_sends;n++){\n MPI_Isend(level_c->interpolation.send_buffers[n],\n level_c->interpolation.send_sizes[n],\n MPI_DOUBLE,\n level_c->interpolation.send_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &send_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/problem.sine.c", "omp_pragma_line": "#pragma omp parallel for private(k,j,i) collapse(3)", "context_chars": 100, "text": " level->my_boxes[box].dim;\n const int dim_k = level->my_boxes[box].dim;\n #ifdef _OPENMP\n for(k=0;k<=dim_k;k++){ // include high face\n for(j=0;j<=dim_j;j++){ // include high face\n for(i=0;i<=dim_i;i++){ // include high face\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;\n double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell\n double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );\n double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );\n double A,B,Bx,By,Bz,Bi,Bj,Bk;\n double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n A = 1.0;\n B = 1.0;\n Bx = 0.0;\n By = 0.0;\n Bz = 0.0; \n Bi = 1.0;\n Bj = 1.0;\n Bk = 1.0;\n #ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...\n evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i\n evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j\n evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k\n evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta\n \n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );\n double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;\n level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;\n level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;\n level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;\n level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U;\n level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n }}} #pragma omp parallel for private(k,j,i) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/symgs.c", "omp_pragma_line": "#pragma omp parallel for private(box)", "context_chars": 100, "text": "y_BCs(level,phi_id,stencil_get_shape());\n\n double _timeStart = getTime();\n #ifdef _OPENMP\n for(box=0;boxnum_my_boxes;box++){\n int i,j,k;\n const int ghosts = level->box_ghosts;\n const int jStride = level->my_boxes[box].jStride;\n const int kStride = level->my_boxes[box].kStride;\n const int dim = level->my_boxes[box].dim;\n const double h2inv = 1.0/(level->h*level->h);\n double * __restrict__ phi = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point\n const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride);\n const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride);\n const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride);\n const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride);\n const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride);\n const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride);\n const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain\n \n\n if( (s&0x1)==0 ){ // forward sweep... hard to thread\n for(k=0;k=0;k--){\n for(j=dim-1;j>=0;j--){\n for(i=dim-1;i>=0;i--){\n int ijk = i + j*jStride + k*kStride;\n double Ax = apply_op_ijk(phi);\n phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax);\n }}}\n }\n\n } #pragma omp parallel for private(box)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_p2.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_f->interpolation.num_recvs>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_recvs;n++){\n MPI_Irecv(level_f->interpolation.recv_buffers[n],\n level_f->interpolation.recv_sizes[n],\n MPI_DOUBLE,\n level_f->interpolation.recv_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &recv_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_p2.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_c->interpolation.num_sends>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_sends;n++){\n MPI_Isend(level_c->interpolation.send_buffers[n],\n level_c->interpolation.send_sizes[n],\n MPI_DOUBLE,\n level_c->interpolation.send_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &send_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/exchange_boundary.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "ange_ghosts[shape].num_recvs>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;nexchange_ghosts[shape].num_recvs;n++){\n MPI_Irecv(level->exchange_ghosts[shape].recv_buffers[n],\n level->exchange_ghosts[shape].recv_sizes[n],\n MPI_DOUBLE,\n level->exchange_ghosts[shape].recv_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &recv_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/exchange_boundary.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "ange_ghosts[shape].num_sends>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;nexchange_ghosts[shape].num_sends;n++){\n MPI_Isend(level->exchange_ghosts[shape].send_buffers[n],\n level->exchange_ghosts[shape].send_sizes[n],\n MPI_DOUBLE,\n level->exchange_ghosts[shape].send_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &send_requests[n]\n ); \n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_v4.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_f->interpolation.num_recvs>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_recvs;n++){\n MPI_Irecv(level_f->interpolation.recv_buffers[n],\n level_f->interpolation.recv_sizes[n],\n MPI_DOUBLE,\n level_f->interpolation.recv_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &recv_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_v4.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_c->interpolation.num_sends>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_sends;n++){\n MPI_Isend(level_c->interpolation.send_buffers[n],\n level_c->interpolation.send_sizes[n],\n MPI_DOUBLE,\n level_c->interpolation.send_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &send_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/problem.p4.c", "omp_pragma_line": "#pragma omp parallel for private(k,j,i) collapse(3)", "context_chars": 100, "text": " level->my_boxes[box].dim;\n const int dim_k = level->my_boxes[box].dim;\n #ifdef _OPENMP\n for(k=0;k<=dim_k;k++){ // include high face\n for(j=0;j<=dim_j;j++){ // include high face\n for(i=0;i<=dim_i;i++){ // include high face\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;\n double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell\n double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );\n double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );\n double A,B,Bx,By,Bz,Bi,Bj,Bk;\n double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n A = 1.0;\n B = 1.0;\n Bx = 0.0;\n By = 0.0;\n Bz = 0.0; \n Bi = 1.0;\n Bj = 1.0;\n Bk = 1.0;\n #ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...\n evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i\n evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j\n evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k\n evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta\n \n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );\n double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;\n level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;\n level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;\n level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;\n level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U;\n level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;\n //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n }}} #pragma omp parallel for private(k,j,i) collapse(3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/restriction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "n[restrictionType].num_recvs>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;nrestriction[restrictionType].num_recvs;n++){\n MPI_Irecv(level_c->restriction[restrictionType].recv_buffers[n],\n level_c->restriction[restrictionType].recv_sizes[n],\n MPI_DOUBLE,\n level_c->restriction[restrictionType].recv_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &recv_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/restriction.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "n[restrictionType].num_sends>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;nrestriction[restrictionType].num_sends;n++){\n MPI_Isend(level_f->restriction[restrictionType].send_buffers[n],\n level_f->restriction[restrictionType].send_sizes[n],\n MPI_DOUBLE,\n level_f->restriction[restrictionType].send_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &send_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_v2.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_f->interpolation.num_recvs>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_recvs;n++){\n MPI_Irecv(level_f->interpolation.recv_buffers[n],\n level_f->interpolation.recv_sizes[n],\n MPI_DOUBLE,\n level_f->interpolation.recv_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &recv_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_v2.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_c->interpolation.num_sends>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_sends;n++){\n MPI_Isend(level_c->interpolation.send_buffers[n],\n level_c->interpolation.send_sizes[n],\n MPI_DOUBLE,\n level_c->interpolation.send_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &send_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_p1.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_f->interpolation.num_recvs>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_recvs;n++){\n MPI_Irecv(level_f->interpolation.recv_buffers[n],\n level_f->interpolation.recv_sizes[n],\n MPI_DOUBLE,\n level_f->interpolation.recv_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &recv_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators/interpolation_p1.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,1)", "context_chars": 100, "text": "l_c->interpolation.num_sends>0){\n _timeStart = getTime();\n #ifdef USE_MPI_THREAD_MULTIPLE\n for(n=0;ninterpolation.num_sends;n++){\n MPI_Isend(level_c->interpolation.send_buffers[n],\n level_c->interpolation.send_sizes[n],\n MPI_DOUBLE,\n level_c->interpolation.send_ranks[n],\n my_tag,\n MPI_COMM_WORLD,\n &send_requests[n]\n );\n } #pragma omp parallel for schedule(dynamic,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/hpgmg-parallel/finite-volume/source/operators.old/symgs.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " dim = level->box_dim;\n const double h2inv = 1.0/(level->h*level->h);\n\n #ifdef _OPENMP\n for(box=0;boxnum_my_boxes;box++){\n int i,j,k;\n double * __restrict__ phi = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point\n const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride);\n const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride);\n const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride);\n const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride);\n const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride);\n const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride);\n const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain\n \n\n if( (s&0x1)==0 ){ // forward sweep... hard to thread\n for(k=0;k=0;k--){\n for(j=dim-1;j>=0;j--){\n for(i=dim-1;i>=0;i--){\n int ijk = i + j*jStride + k*kStride;\n double Ax = apply_op_ijk(phi);\n phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax);\n }}}\n }\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/kernel_initialise.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\n if(*a == NULL) \n {\n die(__LINE__, __FILE__, \"Error allocating buffer %s\\n\");\n }\n\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = kk + jj*x;\n (*a)[index] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "nner = y - 2*halo_depth;\n\n#pragma omp target if(is_offload) \\\n map(from: buffer[:depth*y_inner])\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < halo_depth+depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-halo_depth)*depth;\n buffer[buf_index] = field[jj*x+kk];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "nner = y - 2*halo_depth;\n\n#pragma omp target if(is_offload) \\\n map(from: buffer[:depth*y_inner])\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = x-halo_depth-depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-(x-halo_depth-depth)) + (jj-halo_depth)*depth;\n buffer[buf_index] = field[jj*x+kk];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_inner = x-2*halo_depth;\n\n#pragma omp target if(is_offload) \\\n map(from: buffer[:depth*x_inner])\nfor(int jj = y-halo_depth-depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-(y-halo_depth-depth))*x_inner;\n buffer[buf_index] = field[jj*x+kk];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_inner = x-2*halo_depth;\n\n#pragma omp target if(is_offload) \\\n map(from: buffer[:depth*x_inner])\nfor(int jj = halo_depth; jj < halo_depth+depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-halo_depth)*x_inner;\n buffer[buf_index] = field[jj*x+kk];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_inner = y - 2*halo_depth;\n\n#pragma omp target if(is_offload) \\\n map(to: buffer[:depth*y_inner])\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth-depth; kk < halo_depth; ++kk)\n {\n int buf_index = (kk-(halo_depth-depth)) + (jj-halo_depth)*depth;\n field[jj*x+kk] = buffer[buf_index];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_inner = y - 2*halo_depth;\n\n#pragma omp target if(is_offload) \\\n map(to: buffer[:depth*y_inner])\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = x-halo_depth; kk < x-halo_depth+depth; ++kk)\n {\n int buf_index = (kk-(x-halo_depth)) + (jj-halo_depth)*depth;\n field[jj*x+kk] = buffer[buf_index];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " x_inner = x-2*halo_depth;\n\n#pragma omp target if(is_offload) \\\n map(to: buffer[:depth*x_inner])\nfor(int jj = y-halo_depth; jj < y-halo_depth+depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-(y-halo_depth))*x_inner;\n field[jj*x+kk] = buffer[buf_index];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " x_inner = x-2*halo_depth;\n\n#pragma omp target if(is_offload) \\\n map(to: buffer[:depth*x_inner])\nfor(int jj = halo_depth-depth; jj < halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-(halo_depth-depth))*x_inner;\n field[jj*x+kk] = buffer[buf_index];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/jacobi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " die(__LINE__, __FILE__, \"Coefficient %d is not valid.\\n\", coefficient);\n }\n\n#pragma omp target\nfor(int jj = 1; jj < y-1; ++jj)\n {\n for(int kk = 1; kk < x-1; ++kk)\n {\n const int index = kk + jj*x;\n double temp = energy[index]*density[index];\n u0[index] = temp;\n u[index] = temp;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/jacobi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " u0[index] = temp;\n u[index] = temp;\n }\n }\n\n#pragma omp target\nfor(int jj = halo_depth; jj < y-1; ++jj)\n {\n for(int kk = halo_depth; kk < x-1; ++kk)\n {\n const int index = kk + jj*x;\n double densityCentre = (coefficient == CONDUCTIVITY) \n ? density[index] : 1.0/density[index];\n double densityLeft = (coefficient == CONDUCTIVITY) \n ? density[index-1] : 1.0/density[index-1];\n double densityDown = (coefficient == CONDUCTIVITY) \n ? density[index-x] : 1.0/density[index-x];\n\n kx[index] = rx*(densityLeft+densityCentre)/(2.0*densityLeft*densityCentre);\n ky[index] = ry*(densityDown+densityCentre)/(2.0*densityDown*densityCentre);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/jacobi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n double* ky,\n double* u0,\n double* u,\n double* r)\n{\n#pragma omp target\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = kk + jj*x;\n r[index] = u[index];\t\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/jacobi.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: err)", "context_chars": 100, "text": " r[index] = u[index];\t\n }\n }\n\n double err=0.0;\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n u[index] = (u0[index] \n + (kx[index+1]*r[index+1] + kx[index]*r[index-1])\n + (ky[index+x]*r[index+x] + ky[index]*r[index-x]))\n / (1.0 + (kx[index]+kx[index+1])\n + (ky[index]+ky[index+x]));\n\n err += fabs(u[index]-r[index]);\n }\n } #pragma omp parallel for reduction(+: err)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "st int y,\n const int halo_depth,\n double* u0,\n double* u)\n{\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n u0[index] = u[index];\t\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* u0,\n double* r,\n double* kx,\n double* ky)\n{\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(u);\n r[index] = u0[index] - smvp;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:norm_temp)", "context_chars": 100, "text": "th,\n double* buffer,\n double* norm)\n{\n double norm_temp = 0.0;\n\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n norm_temp += buffer[index]*buffer[index];\t\t\t\n }\n } #pragma omp parallel for reduction(+:norm_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "alo_depth,\n double* energy,\n double* density,\n double* u)\n{\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n energy[index] = u[index]/density[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cheby.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nst int y,\n const int halo_depth,\n double* u,\n double* p)\n{\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\t\n const int index = kk + jj*x;\n u[index] += p[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cheby.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n double* r,\n double* w,\n double* kx,\n double* ky)\n{\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(u);\n w[index] = smvp;\n r[index] = u0[index]-w[index];\n p[index] = r[index] / theta;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cheby.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n double* r,\n double* w,\n double* kx,\n double* ky)\n{\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\t\n const int index = kk + jj*x;\n const double smvp = SMVP(u);\n w[index] = smvp;\n r[index] = u0[index]-w[index];\n p[index] = alpha*p[index] + beta*r[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " die(__LINE__, __FILE__, \"Coefficient %d is not valid.\\n\", coefficient);\n }\n\n#pragma omp target\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = kk + jj*x;\n p[index] = 0.0;\n r[index] = 0.0;\n u[index] = energy[index]*density[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ex] = 0.0;\n u[index] = energy[index]*density[index];\n }\n }\n\n#pragma omp target\nfor(int jj = 1; jj < y-1; ++jj)\n {\n for(int kk = 1; kk < x-1; ++kk)\n {\n const int index = kk + jj*x;\n w[index] = (coefficient == CONDUCTIVITY) \n ? density[index] : 1.0/density[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "Y) \n ? density[index] : 1.0/density[index];\n }\n }\n\n \n#pragma omp target \nfor(int jj = halo_depth; jj < y-1; ++jj)\n {\n for(int kk = halo_depth; kk < x-1; ++kk)\n {\n const int index = kk + jj*x;\n kx[index] = rx*(w[index-1]+w[index]) /\n (2.0*w[index-1]*w[index]);\n ky[index] = ry*(w[index-x]+w[index]) /\n (2.0*w[index-x]*w[index]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:rro_temp)", "context_chars": 100, "text": " (2.0*w[index-x]*w[index]);\n }\n }\n\n double rro_temp = 0.0;\n\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(u);\n w[index] = smvp;\n r[index] = u[index]-w[index];\n p[index] = r[index];\n rro_temp += r[index]*p[index];\n }\n } #pragma omp parallel for reduction(+:rro_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:pw_temp)", "context_chars": 100, "text": " double* w,\n double* kx,\n double* ky)\n{\n double pw_temp = 0.0;\n\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(p);\n w[index] = smvp;\n pw_temp += w[index]*p[index];\n }\n } #pragma omp parallel for reduction(+:pw_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:rrn_temp)", "context_chars": 100, "text": " double* p,\n double* r,\n double* w)\n{\n double rrn_temp = 0.0;\n\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n\n u[index] += alpha*p[index];\n r[index] -= alpha*w[index];\n rrn_temp += r[index]*r[index];\n }\n } #pragma omp parallel for reduction(+:rrn_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t halo_depth,\n const double beta,\n double* p,\n double* r)\n{\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n\n p[index] = beta*p[index] + r[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/ppcg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t int halo_depth,\n double theta,\n double* r,\n double* sd)\n{\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n sd[index] = r[index] / theta;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/ppcg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* r,\n double* kx,\n double* ky,\n double* sd)\n{\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(sd);\n r[index] -= smvp;\n u[index] += sd[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/ppcg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " r[index] -= smvp;\n u[index] += sd[index];\n }\n }\n\n#pragma omp target\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n sd[index] = alpha*sd[index] + beta*r[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/local_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "st int depth, \n double* buffer,\n bool is_offload)\n{\n#pragma omp target if(is_offload)\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = 0; kk < depth; ++kk)\n {\n int base = jj*x;\n buffer[base+(halo_depth-kk-1)] = buffer[base+(halo_depth+kk)];\t\t\t\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/local_halos.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "nst int depth,\n double* buffer,\n bool is_offload)\n{\n#pragma omp target if(is_offload)\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = 0; kk < depth; ++kk)\n {\n int base = jj*x;\n buffer[base+(x-halo_depth+kk)] \n = buffer[base+(x-halo_depth-1-kk)];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " bool is_offload)\n{\n for(int jj = 0; jj < depth; ++jj)\n {\n#pragma omp target if(is_offload)\nfor(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int base = kk;\n buffer[base+(y-halo_depth+jj)*x] \n = buffer[base+(y-halo_depth-1-jj)*x];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_knc/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " bool is_offload)\n{\n for(int jj = 0; jj < depth; ++jj)\n {\n#pragma omp target if(is_offload)\nfor(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int base = kk;\n buffer[base+(halo_depth-jj-1)*x] \n = buffer[base+(halo_depth+jj)*x];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp4_clang/kernel_initialise.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ble)*x*y);\n\n if(*a == NULL) \n {\n die(__LINE__, __FILE__, \"Error allocating buffer %s\\n\");\n }\n\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = kk + jj*x;\n (*a)[index] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/openacc/kernel_initialise.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\n if(*a == NULL) \n {\n die(__LINE__, __FILE__, \"Error allocating buffer %s\\n\");\n }\n\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = kk + jj*x;\n (*a)[index] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/kernel_initialise.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\n if(*a == NULL) \n {\n die(__LINE__, __FILE__, \"Error allocating buffer %s\\n\");\n }\n\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int ind = jj*x+kk;\n (*a)[ind] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/store_energy.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "void store_energy(\n int x,\n int y,\n double* energy0,\n double* energy)\n{\nfor(int ii = 0; ii < x*y; ++ii)\n {\n energy[ii] = energy0[ii];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int depth,\n const int halo_depth,\n double* field,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < halo_depth+depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-halo_depth)*depth;\n buffer[buf_index] = field[jj*x+kk];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int depth,\n const int halo_depth,\n double* field,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = x-halo_depth-depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-(x-halo_depth-depth)) + (jj-halo_depth)*depth;\n buffer[buf_index] = field[jj*x+kk];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lo_depth,\n double* field,\n double* buffer)\n{\n const int x_inner = x-2*halo_depth;\n\nfor(int jj = y-halo_depth-depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-(y-halo_depth-depth))*x_inner;\n buffer[buf_index] = field[jj*x+kk];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lo_depth,\n double* field,\n double* buffer)\n{\n const int x_inner = x-2*halo_depth;\n\nfor(int jj = halo_depth; jj < halo_depth+depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-halo_depth)*x_inner;\n buffer[buf_index] = field[jj*x+kk];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int depth,\n const int halo_depth,\n double* field,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth-depth; kk < halo_depth; ++kk)\n {\n int buf_index = (kk-(halo_depth-depth)) + (jj-halo_depth)*depth;\n field[jj*x+kk] = buffer[buf_index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int depth,\n const int halo_depth,\n double* field,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = x-halo_depth; kk < x-halo_depth+depth; ++kk)\n {\n int buf_index = (kk-(x-halo_depth)) + (jj-halo_depth)*depth;\n field[jj*x+kk] = buffer[buf_index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lo_depth,\n double* field,\n double* buffer)\n{\n const int x_inner = x-2*halo_depth;\n\nfor(int jj = y-halo_depth; jj < y-halo_depth+depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-(y-halo_depth))*x_inner;\n field[jj*x+kk] = buffer[buf_index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lo_depth,\n double* field,\n double* buffer)\n{\n const int x_inner = x-2*halo_depth;\n\nfor(int jj = halo_depth-depth; jj < halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int buf_index = (kk-halo_depth) + (jj-(halo_depth-depth))*x_inner;\n field[jj*x+kk] = buffer[buf_index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/jacobi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "IVITY)\n {\n die(__LINE__, __FILE__, \"Coefficient %d is not valid.\\n\", coefficient);\n }\n\nfor(int jj = 1; jj < y-1; ++jj)\n {\n for(int kk = 1; kk < x-1; ++kk)\n {\n const int index = jj*x+kk;\n double temp = energy[index]*density[index];\n u0[index] = temp;\n u[index] = temp;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/jacobi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[index]*density[index];\n u0[index] = temp;\n u[index] = temp;\n }\n }\n\nfor(int jj = halo_depth; jj < y-1; ++jj)\n {\n for(int kk = halo_depth; kk < x-1; ++kk)\n {\n const int index = jj*x+kk;\n double densityCentre = (coefficient == CONDUCTIVITY) \n ? density[index] : 1.0/density[index];\n double densityLeft = (coefficient == CONDUCTIVITY) \n ? density[index-1] : 1.0/density[index-1];\n double densityDown = (coefficient == CONDUCTIVITY) \n ? density[index-x] : 1.0/density[index-x];\n\n kx[index] = rx*(densityLeft+densityCentre)/(2.0*densityLeft*densityCentre);\n ky[index] = ry*(densityDown+densityCentre)/(2.0*densityDown*densityCentre);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/jacobi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* kx,\n double* ky,\n double* u0,\n double* u,\n double* r)\n{\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = jj*x+kk;\n r[index] = u[index];\t\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/jacobi.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: err)", "context_chars": 100, "text": " const int index = jj*x+kk;\n r[index] = u[index];\t\n }\n }\n\n double err=0.0;\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = jj*x+kk;\n u[index] = (u0[index] \n + (kx[index+1]*r[index+1] + kx[index]*r[index-1])\n + (ky[index+x]*r[index+x] + ky[index]*r[index-x]))\n / (1.0 + (kx[index]+kx[index+1])\n + (ky[index]+ky[index+x]));\n\n err += abs(u[index]-r[index]);\n }\n } #pragma omp parallel for reduction(+: err)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int x,\n const int y,\n const int halo_depth,\n double* u0,\n double* u)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int index = jj*x+kk;\n u0[index] = u[index];\t\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* u,\n double* u0,\n double* r,\n double* kx,\n double* ky)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int index = jj*x+kk;\n const double smvp = SMVP(u);\n r[index] = u0[index] - smvp;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:norm_temp)", "context_chars": 100, "text": " const int halo_depth,\n double* buffer,\n double* norm)\n{\n double norm_temp = 0.0;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int index = jj*x+kk;\n norm_temp += buffer[index]*buffer[index];\t\t\t\n }\n } #pragma omp parallel for reduction(+:norm_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int halo_depth,\n double* energy,\n double* density,\n double* u)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int index = jj*x+kk;\n energy[index] = u[index]/density[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cheby.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int halo_depth,\n double* u,\n double* p)\n{\n int x_inner = x - 2*halo_depth;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n const int offset = jj*x + halo_depth;\n cblas_daxpy(x_inner, 1.0, p + offset, 1, u + offset, 1);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cheby.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \"n\", &m, a_non_zeros, a_row_index, a_col_index, u, w);\n\n int x_inner = x - 2*halo_depth;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n const int offset = jj*x + halo_depth;\n cblas_dcopy(x_inner, u0 + offset, 1, r + offset, 1);\n cblas_daxpy(x_inner, -1.0, w + offset, 1, r + offset, 1);\n cblas_dscal(x_inner, 1.0/theta, r + offset, 1);\n cblas_dcopy(x_inner, r + offset, 1, p + offset, 1);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cheby.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \"n\", &m, a_non_zeros, a_row_index, a_col_index, u, w);\n\n int x_inner = x - 2*halo_depth;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n const int offset = jj*x + halo_depth;\n cblas_dcopy(x_inner, u0 + offset, 1, r + offset, 1);\n cblas_daxpy(x_inner, -1.0, w + offset, 1, r + offset, 1);\n cblas_dscal(x_inner, alpha, p + offset, 1);\n cblas_daxpy(x_inner, beta, r + offset, 1, p + offset, 1);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "IVITY)\n {\n die(__LINE__, __FILE__, \"Coefficient %d is not valid.\\n\", coefficient);\n }\n\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = jj*x+kk;\n p[index] = 0.0;\n r[index] = 0.0;\n u[index] = energy[index]*density[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n r[index] = 0.0;\n u[index] = energy[index]*density[index];\n }\n }\n\nfor(int jj = 1; jj < y-1; ++jj)\n {\n for(int kk = 1; kk < x-1; ++kk)\n {\n const int index = jj*x+kk;\n w[index] = (coefficient == CONDUCTIVITY) \n ? density[index] : 1.0/density[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "efficient == CONDUCTIVITY) \n ? density[index] : 1.0/density[index];\n }\n }\n\nfor(int jj = halo_depth; jj < y-1; ++jj)\n {\n for(int kk = halo_depth; kk < x-1; ++kk)\n {\n const int index = jj*x + kk;\n kx[index] = rx*(w[index-1]+w[index]) /\n (2.0*w[index-1]*w[index]);\n ky[index] = ry*(w[index-x]+w[index]) /\n (2.0*w[index-x]*w[index]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:rro_temp)", "context_chars": 100, "text": " \"n\", &m, a_non_zeros, a_row_index, a_col_index, u, w);\n\n int x_inner = x-2*halo_depth;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n const int offset = jj*x + halo_depth;\n cblas_dcopy(x_inner, u + offset, 1, r + offset, 1);\n cblas_daxpy(x_inner, -1.0, w + offset, 1, r + offset, 1);\n cblas_dcopy(x_inner, r + offset, 1, p + offset, 1);\n rro_temp += cblas_ddot(x_inner, r + offset, 1, p + offset, 1);\n } #pragma omp parallel for reduction(+:rro_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:pw_temp)", "context_chars": 100, "text": " \"n\", &m, a_non_zeros, a_row_index, a_col_index, p, w);\n\n int x_inner = x - 2*halo_depth;\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n int offset = jj*x + halo_depth;\n pw_temp += cblas_ddot(x_inner, w + offset, 1, p + offset, 1);\n } #pragma omp parallel for reduction(+:pw_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:rrn_temp)", "context_chars": 100, "text": " double* r,\n double* w)\n{\n double rrn_temp = 0.0;\n int x_inner = x - 2*halo_depth;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n const int offset = jj*x + halo_depth;\n cblas_daxpy(x_inner, alpha, p + offset, 1, u + offset, 1);\n cblas_daxpy(x_inner, -alpha, w + offset, 1, r + offset, 1);\n rrn_temp += cblas_ddot(x_inner, r + offset, 1, r + offset, 1);\n } #pragma omp parallel for reduction(+:rrn_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const double beta,\n double* p,\n double* r)\n{\n int x_inner = x - 2*halo_depth;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n const int offset = jj*x + halo_depth;\n cblas_dscal(x_inner, beta, p + offset, 1);\n cblas_daxpy(x_inner, 1.0, r + offset, 1, p + offset, 1);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/ppcg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "int y,\n const int halo_depth,\n double theta,\n double* r,\n double* sd)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = jj*x+kk;\n sd[index] = r[index] / theta;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/ppcg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* u,\n double* r,\n double* kx,\n double* ky,\n double* sd)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = jj*x+kk;\n const double smvp = SMVP(sd);\n r[index] -= smvp;\n u[index] += sd[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/ppcg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " smvp = SMVP(sd);\n r[index] -= smvp;\n u[index] += sd[index];\n }\n }\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = jj*x+kk;\n sd[index] = alpha*sd[index] + beta*r[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int y,\n const int halo_depth,\n const int depth, \n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = 0; kk < depth; ++kk)\n {\n int base = jj*x;\n buffer[base+(halo_depth-kk-1)] = buffer[base+(halo_depth+kk)];\t\t\t\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int y,\n const int halo_depth,\n const int depth,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = 0; kk < depth; ++kk)\n {\n int base = jj*x;\n buffer[base+(x-halo_depth+kk)] \n = buffer[base+(x-halo_depth-1-kk)];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int y,\n const int halo_depth,\n const int depth, \n double* buffer)\n{\nfor(int jj = 0; jj < depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int base = kk;\n buffer[base+(y-halo_depth+jj)*x] \n = buffer[base+(y-halo_depth-1-jj)*x];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/mkl/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int y,\n const int halo_depth,\n const int depth, \n double* buffer)\n{\nfor(int jj = 0; jj < depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int base = kk;\n buffer[base+(halo_depth-jj-1)*x] \n = buffer[base+(halo_depth+jj)*x];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/kernel_initialise.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\n if(*a == NULL) \n {\n die(__LINE__, __FILE__, \"Error allocating buffer %s\\n\");\n }\n\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = kk + jj*x;\n (*a)[index] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int depth,\n const int halo_depth,\n double* field,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < halo_depth+depth; ++kk)\n {\n int bufIndex = (kk-halo_depth) + (jj-halo_depth)*depth;\n buffer[bufIndex] = field[jj*x+kk];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int depth,\n const int halo_depth,\n double* field,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = x-halo_depth-depth; kk < x-halo_depth; ++kk)\n {\n int bufIndex = (kk-(x-halo_depth-depth)) + (jj-halo_depth)*depth;\n buffer[bufIndex] = field[jj*x+kk];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lo_depth,\n double* field,\n double* buffer)\n{\n const int x_inner = x-2*halo_depth;\n\nfor(int jj = y-halo_depth-depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int bufIndex = (kk-halo_depth) + (jj-(y-halo_depth-depth))*x_inner;\n buffer[bufIndex] = field[jj*x+kk];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lo_depth,\n double* field,\n double* buffer)\n{\n const int x_inner = x-2*halo_depth;\n\nfor(int jj = halo_depth; jj < halo_depth+depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int bufIndex = (kk-halo_depth) + (jj-halo_depth)*x_inner;\n buffer[bufIndex] = field[jj*x+kk];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int depth,\n const int halo_depth,\n double* field,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth-depth; kk < halo_depth; ++kk)\n {\n int bufIndex = (kk-(halo_depth-depth)) + (jj-halo_depth)*depth;\n field[jj*x+kk] = buffer[bufIndex];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int depth,\n const int halo_depth,\n double* field,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = x-halo_depth; kk < x-halo_depth+depth; ++kk)\n {\n int bufIndex = (kk-(x-halo_depth)) + (jj-halo_depth)*depth;\n field[jj*x+kk] = buffer[bufIndex];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lo_depth,\n double* field,\n double* buffer)\n{\n const int x_inner = x-2*halo_depth;\n\nfor(int jj = y-halo_depth; jj < y-halo_depth+depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int bufIndex = (kk-halo_depth) + (jj-(y-halo_depth))*x_inner;\n field[jj*x+kk] = buffer[bufIndex];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/pack_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lo_depth,\n double* field,\n double* buffer)\n{\n const int x_inner = x-2*halo_depth;\n\nfor(int jj = halo_depth-depth; jj < halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int bufIndex = (kk-halo_depth) + (jj-(halo_depth-depth))*x_inner;\n field[jj*x+kk] = buffer[bufIndex];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/jacobi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "IVITY)\n {\n die(__LINE__, __FILE__, \"Coefficient %d is not valid.\\n\", coefficient);\n }\n\nfor(int jj = 1; jj < y-1; ++jj)\n {\n for(int kk = 1; kk < x-1; ++kk)\n {\n const int index = kk + jj*x;\n double temp = energy[index]*density[index];\n u0[index] = temp;\n u[index] = temp;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/jacobi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[index]*density[index];\n u0[index] = temp;\n u[index] = temp;\n }\n }\n\nfor(int jj = halo_depth; jj < y-1; ++jj)\n {\n for(int kk = halo_depth; kk < x-1; ++kk)\n {\n const int index = kk + jj*x;\n double densityCentre = (coefficient == CONDUCTIVITY) \n ? density[index] : 1.0/density[index];\n double densityLeft = (coefficient == CONDUCTIVITY) \n ? density[index-1] : 1.0/density[index-1];\n double densityDown = (coefficient == CONDUCTIVITY) \n ? density[index-x] : 1.0/density[index-x];\n\n kx[index] = rx*(densityLeft+densityCentre)/(2.0*densityLeft*densityCentre);\n ky[index] = ry*(densityDown+densityCentre)/(2.0*densityDown*densityCentre);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/jacobi.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* kx,\n double* ky,\n double* u0,\n double* u,\n double* r)\n{\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = kk + jj*x;\n r[index] = u[index];\t\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/jacobi.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: err)", "context_chars": 100, "text": "const int index = kk + jj*x;\n r[index] = u[index];\t\n }\n }\n\n double err=0.0;\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n u[index] = (u0[index] \n + (kx[index+1]*r[index+1] + kx[index]*r[index-1])\n + (ky[index+x]*r[index+x] + ky[index]*r[index-x]))\n / (1.0 + (kx[index]+kx[index+1])\n + (ky[index]+ky[index+x]));\n\n err += fabs(u[index]-r[index]);\n }\n } #pragma omp parallel for reduction(+: err)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int x,\n const int y,\n const int halo_depth,\n double* u0,\n double* u)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n u0[index] = u[index];\t\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* u,\n double* u0,\n double* r,\n double* kx,\n double* ky)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(u);\n r[index] = u0[index] - smvp;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:norm_temp)", "context_chars": 100, "text": " const int halo_depth,\n double* buffer,\n double* norm)\n{\n double norm_temp = 0.0;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n norm_temp += buffer[index]*buffer[index];\t\t\t\n }\n } #pragma omp parallel for reduction(+:norm_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/solver_methods.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int halo_depth,\n double* energy,\n double* density,\n double* u)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n energy[index] = u[index]/density[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cheby.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t int x,\n const int y,\n const int halo_depth,\n double* u,\n double* p)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\t\n const int index = kk + jj*x;\n u[index] += p[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cheby.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n double* p,\n double* r,\n double* w,\n double* kx,\n double* ky)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(u);\n w[index] = smvp;\n r[index] = u0[index]-w[index];\n p[index] = r[index] / theta;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cheby.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n double* p,\n double* r,\n double* w,\n double* kx,\n double* ky)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\t\n const int index = kk + jj*x;\n const double smvp = SMVP(u);\n w[index] = smvp;\n r[index] = u0[index]-w[index];\n p[index] = alpha*p[index] + beta*r[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "IVITY)\n {\n die(__LINE__, __FILE__, \"Coefficient %d is not valid.\\n\", coefficient);\n }\n\nfor(int jj = 0; jj < y; ++jj)\n {\n for(int kk = 0; kk < x; ++kk)\n {\n const int index = kk + jj*x;\n p[index] = 0.0;\n r[index] = 0.0;\n u[index] = energy[index]*density[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n r[index] = 0.0;\n u[index] = energy[index]*density[index];\n }\n }\n\nfor(int jj = 1; jj < y-1; ++jj)\n {\n for(int kk = 1; kk < x-1; ++kk)\n {\n const int index = kk + jj*x;\n w[index] = (coefficient == CONDUCTIVITY) \n ? density[index] : 1.0/density[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "efficient == CONDUCTIVITY) \n ? density[index] : 1.0/density[index];\n }\n }\n\nfor(int jj = halo_depth; jj < y-1; ++jj)\n {\n for(int kk = halo_depth; kk < x-1; ++kk)\n {\n const int index = kk + jj*x;\n kx[index] = rx*(w[index-1]+w[index]) /\n (2.0*w[index-1]*w[index]);\n ky[index] = ry*(w[index-x]+w[index]) /\n (2.0*w[index-x]*w[index]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:rro_temp)", "context_chars": 100, "text": "w[index]) /\n (2.0*w[index-x]*w[index]);\n }\n }\n\n double rro_temp = 0.0;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(u);\n w[index] = smvp;\n r[index] = u[index]-w[index];\n p[index] = r[index];\n rro_temp += r[index]*p[index];\n }\n } #pragma omp parallel for reduction(+:rro_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:pw_temp)", "context_chars": 100, "text": " double* p,\n double* w,\n double* kx,\n double* ky)\n{\n double pw_temp = 0.0;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(p);\n w[index] = smvp;\n pw_temp += w[index]*p[index];\n }\n } #pragma omp parallel for reduction(+:pw_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:rrn_temp)", "context_chars": 100, "text": " double* u,\n double* p,\n double* r,\n double* w)\n{\n double rrn_temp = 0.0;\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n\n u[index] += alpha*p[index];\n r[index] -= alpha*w[index];\n rrn_temp += r[index]*r[index];\n }\n } #pragma omp parallel for reduction(+:rrn_temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "y,\n const int halo_depth,\n const double beta,\n double* p,\n double* r)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n\n p[index] = beta*p[index] + r[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/ppcg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "int y,\n const int halo_depth,\n double theta,\n double* r,\n double* sd)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n sd[index] = r[index] / theta;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/ppcg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* u,\n double* r,\n double* kx,\n double* ky,\n double* sd)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n const double smvp = SMVP(sd);\n r[index] -= smvp;\n u[index] += sd[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/ppcg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " smvp = SMVP(sd);\n r[index] -= smvp;\n u[index] += sd[index];\n }\n }\n\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n const int index = kk + jj*x;\n sd[index] = alpha*sd[index] + beta*r[index];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int y,\n const int halo_depth,\n const int depth, \n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = 0; kk < depth; ++kk)\n {\n int base = jj*x;\n buffer[base+(halo_depth-kk-1)] = buffer[base+(halo_depth+kk)];\t\t\t\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const int y,\n const int halo_depth,\n const int depth,\n double* buffer)\n{\nfor(int jj = halo_depth; jj < y-halo_depth; ++jj)\n {\n for(int kk = 0; kk < depth; ++kk)\n {\n int base = jj*x;\n buffer[base+(x-halo_depth+kk)] \n = buffer[base+(x-halo_depth-1-kk)];\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "th,\n const int depth, \n double* buffer)\n{\n for(int jj = 0; jj < depth; ++jj)\n {\nfor(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int base = kk;\n buffer[base+(y-halo_depth+jj)*x] \n = buffer[base+(y-halo_depth-1-jj)*x];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/tallendev/uvm-eval/archive/benchmarksv3/tealeaf-parallel/c_kernels/omp3/local_halos.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "th,\n const int depth, \n double* buffer)\n{\n for(int jj = 0; jj < depth; ++jj)\n {\nfor(int kk = halo_depth; kk < x-halo_depth; ++kk)\n {\n int base = kk;\n buffer[base+(halo_depth-jj-1)*x] \n = buffer[base+(halo_depth+jj)*x];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HExSA-Lab/nautilus/src/test/openmp/test_openmp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "arallel\n\tnk_vc_printf(\"I am thread %d (%d total)\\n\",omp_get_thread_num(),omp_get_num_threads());\n\t\n\nfor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HExSA-Lab/nautilus/src/test/openmp/openmpbench_C_v31/syncbench.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "aylength);\n\t }\n\t}\n }\n}\n\nvoid testpfor() {\n int i, j;\n for (j = 0; j < innerreps; j++) {\nfor (i = 0; i < nthreads; i++) {\n\t delay(delaylength);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HExSA-Lab/nautilus/src/test/openmp/openmpbench_C_v31/syncbench.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule (static,1)", "context_chars": 100, "text": "ck);\n\t delay(delaylength);\n\t omp_unset_lock(&lock);\n\t}\n }\n}\n\nvoid testorder() {\n int j;\nfor (j = 0; j < (int)innerreps; j++) {\n#pragma omp ordered\n\tdelay(delaylength);\n } #pragma omp parallel for ordered schedule (static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HExSA-Lab/nautilus/src/test/openmp/streamcluster/streamcluster_omp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "global *lower* fields\n double* gl_lower = &work_mem[nproc*stride];\n\t\n\t// OpenMP parallelization\n//\t#pragma omp parallel for reduction(+: cost_of_opening_x)\n for ( i = k1; i < k2; i++ ) {\n float x_cost = dist(points->p[i], points->p[x], points->dim) \n * points->p[i].weight;\n float current_cost = points->p[i].cost;\n\t\t\n if ( x_cost < current_cost ) {\n\n // point i would save cost just by switching to x\n // (note that i cannot be a median, \n // or else dist(p[i], p[x]) would be 0)\t\t\t\n switch_membership[i] = 1;\n cost_of_opening_x += x_cost - current_cost;\t\t\t\n } else {\n\n // cost of assigning i to x is at least current assignment cost of i\n\n // consider the savings that i's **current** median would realize\n // if we reassigned that median and all its members to x;\n // note we've already accounted for the fact that the median\n // would save z by closing; now we have to subtract from the savings\n // the extra cost of reassigning that median and its members \n int assign = points->p[i].assign;\n lower[center_table[assign]] += current_cost - x_cost;\t\t\t\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HExSA-Lab/nautilus/src/test/openmp/streamcluster/streamcluster_omp.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: cost_of_opening_x)", "context_chars": 100, "text": "ble* gl_lower = &work_mem[nproc*stride];\n\t\n\t// OpenMP parallelization\n//\t#pragma omp parallel for \n\tfor ( i = k1; i < k2; i++ ) {\n float x_cost = dist(points->p[i], points->p[x], points->dim) \n * points->p[i].weight;\n float current_cost = points->p[i].cost;\n\t\t\n if ( x_cost < current_cost ) {\n\n // point i would save cost just by switching to x\n // (note that i cannot be a median, \n // or else dist(p[i], p[x]) would be 0)\t\t\t\n switch_membership[i] = 1;\n cost_of_opening_x += x_cost - current_cost;\t\t\t\n } else {\n\n // cost of assigning i to x is at least current assignment cost of i\n\n // consider the savings that i's **current** median would realize\n // if we reassigned that median and all its members to x;\n // note we've already accounted for the fact that the median\n // would save z by closing; now we have to subtract from the savings\n // the extra cost of reassigning that median and its members \n int assign = points->p[i].assign;\n lower[center_table[assign]] += current_cost - x_cost;\t\t\t\n }\n } #pragma omp parallel for reduction(+: cost_of_opening_x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HExSA-Lab/nautilus/src/test/openmp/streamcluster/streamcluster_omp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "do nothing\n\n if ( gl_cost_of_opening_x < 0 ) {\n // we'd save money by opening x; we'll do it\n\t\tfor ( int i = k1; i < k2; i++ ) {\n bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ;\n if ( switch_membership[i] || close_center ) {\n\t\t\t\t// Either i's median (which may be i itself) is closing,\n\t\t\t\t// or i is closer to x than to its current median\n\t\t\t\tpoints->p[i].cost = points->p[i].weight *\n\t\t\t\t\tdist(points->p[i], points->p[x], points->dim);\n\t\t\t\tpoints->p[i].assign = x;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/OPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:abserr)", "context_chars": 100, "text": " transposition */\n\ndouble test_results (int order, double *B) {\n\n double abserr=0.0;\n int i,j;\n\n for (j=0;j #pragma omp parallel for private(i) reduction(+:abserr)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/OPENMP/DGEMM/dgemm.c", "omp_pragma_line": "#pragma omp parallel for private(i,j) ", "context_chars": 100, "text": "exit(EXIT_FAILURE);\n }\n\n ref_checksum = (0.25*forder*forder*forder*(forder-1.0)*(forder-1.0));\n\n for(j = 0; j < order; j++) for(i = 0; i < order; i++) {\n A_arr(i,j) = B_arr(i,j) = (double) j; \n C_arr(i,j) = 0.0;\n } #pragma omp parallel for private(i,j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "* intialize the input and output arrays */\n#ifdef PARALLELFOR\n #else\n #pragma omp for\n\n for (j=0; j #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "ndif\n for (j=0; j#else\n #pragma omp for\n\n for (j=RADIUS; j #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private(i, ii, jj)", "context_chars": 100, "text": " omp master\n#endif\n { \n stencil_time = wtime();\n }\n }\n\n#ifdef PARALLELFOR\n #else\n #pragma omp for\n\n for (j=RADIUS; j #pragma omp parallel for private(i, ii, jj)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": " add constant to solution to force refresh of neighbor data, if any */\n#ifdef PARALLELFOR\n #else\n #pragma omp for\n\n for (j=0; j #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:norm), private (i)", "context_chars": 100, "text": " compute L1 norm in parallel */\n#ifdef PARALLELFOR\n #else\n #pragma omp for reduction(+:norm)\n\n for (j=RADIUS; j #pragma omp parallel for reduction(+:norm), private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i,it,jt) collapse(2)", "context_chars": 100, "text": " */\n istart = 0; \n\n if (tiling) {\n#ifdef COLLAPSE\n #else\n #pragma omp parallel for private (i,it,jt)\n\n for (j=0; j #pragma omp parallel for private (i,it,jt) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i,it,jt)", "context_chars": 100, "text": " if (tiling) {\n#ifdef COLLAPSE\n #pragma omp parallel for private (i,it,jt) collapse(2)\n#else\n for (j=0; j #pragma omp parallel for private (i,it,jt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "t) = (double) (order*(jt+colstart) + it);\n B(it,jt) = -1.0;\n }\n }\n else {\n for (j=0;j #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " */\n istart = colstart; \n if (!tiling) {\n for (i=0; i #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j,it,jt) collapse(2)", "context_chars": 100, "text": "or (j=0; j#else\n #pragma omp parallel for private (j,it,jt)\n\n for (i=0; i #pragma omp parallel for private (j,it,jt) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j,it,jt)", "context_chars": 100, "text": " else {\n#ifdef COLLAPSE\n #pragma omp parallel for private (j,it,jt) collapse(2)\n#else\n for (i=0; i #pragma omp parallel for private (j,it,jt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "COMM_WORLD, &recv_req); \n#endif\n\n istart = send_to*Block_order; \n if (!tiling) {\n for (i=0; i #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j,it,jt) collapse(2)", "context_chars": 100, "text": "j#else\n #pragma omp parallel for private (j,it,jt)\n\n for (i=0; i #pragma omp parallel for private (j,it,jt) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j,it,jt)", "context_chars": 100, "text": "else {\n#ifdef COLLAPSE\n #pragma omp parallel for private (j,it,jt) collapse(2)\n#else\n for (i=0; i #pragma omp parallel for private (j,it,jt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "v_from*Block_order; \n /* scatter received block to transposed matrix; no need to tile */\n for (j=0; j #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "_time, 1, MPI_DOUBLE, MPI_MAX, root,\n MPI_COMM_WORLD);\n\n abserr = 0.0;\n istart = 0;\n for (j=0;j #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Nstream/nstream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "fset = %ld\\n\", offset);\n printf(\"Number of iterations = %d\\n\", iterations);\n }\n\n #pragma vector always\n for (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Nstream/nstream.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "if (iter == 1) { \n MPI_Barrier(MPI_COMM_WORLD);\n local_nstream_time = wtime();\n }\n\n #pragma vector always\n for (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Nstream/nstream.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:asum)", "context_chars": 100, "text": "0; iter<=iterations; iter++) aj += bj+scalar*cj;\n \n aj = aj * (double) (length);\n \n asum = 0.0;\n for (j=0; j epsilon) {\n printf (\"Failed Validation on output array\\n\");\n#ifndef VERBOSE\n printf (\" Expected checksum: %f \\n\",aj);\n printf (\" Observed checksum: %f \\n\",asum);\n\n return (0);\n } #pragma omp parallel for reduction(+:asum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": ") (n-2*RADIUS);\n /* intialize the input and output arrays */\n for (j=jstart; j<=jend; j++) for (i=istart; i<=iend; i++) {\n IN(i,j) = COEFX*i+COEFY*j;\n OUT(i,j) = (DTYPE)0.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " IN(i,j) = left_buf_in[kk++];\n } \n }\n\n /* Apply the stencil operator */\n for (j=MAX(jstart,RADIUS); j<=MIN(n-RADIUS-1,jend); j++) {\n for (i=MAX(istart,RADIUS); i<=MIN(n-RADIUS-1,iend); i++) {\n for (jj=-RADIUS; jj<=RADIUS; jj++) {\n OUT(i,j) += WEIGHT(0,jj)*IN(i,j+jj);\n }\n for (ii=-RADIUS; ii<0; ii++) {\n OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j);\n }\n for (ii=1; ii<=RADIUS; ii++) {\n OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j);\n \n }\n }\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " ii<=RADIUS; ii++) {\n OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j);\n \n }\n }\n }\n \n /* add constant to solution to force refresh of neighbor data, if any */\n for (j=jstart; j<=jend; j++) for (i=istart; i<=iend; i++) IN(i,j)+= 1.0;\n \n }\n \n local_stencil_time = wtime() - local_stencil_time;\n MPI_Reduce(&local_stencil_time, &stencil_time, 1, MPI_DOUBLE, MPI_MAX, root,\n MPI_COMM_WORLD);\n \n /* compute L1 norm in parallel */\n local_norm = (DTYPE) 0.0;\n#pragma omp parallel for reduction(+:local_norm) private (i)\n for (j=MAX(jstart,RADIUS); j<=MIN(n-RADIUS-1,jend); j++) {\n for (i=MAX(istart,RADIUS); i<=MIN(n-RADIUS-1,iend); i++) {\n local_norm += (DTYPE)ABS(OUT(i,j));\n }\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/prk/tests/MPIOPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:local_norm) private (i)", "context_chars": 100, "text": "e L1 norm in parallel */\n local_norm = (DTYPE) 0.0;\nfor (j=MAX(jstart,RADIUS); j<=MIN(n-RADIUS-1,jend); j++) {\n for (i=MAX(istart,RADIUS); i<=MIN(n-RADIUS-1,iend); i++) {\n local_norm += (DTYPE)ABS(OUT(i,j));\n }\n } #pragma omp parallel for reduction(+:local_norm) private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/GenerateProblem_ref.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "utes the physical placement of arrays of pointers across the memory system\n#ifndef HPCG_NO_OPENMP\n for (local_int_t i=0; i< localNumberOfRows; ++i) {\n matrixValues[i] = 0;\n matrixDiagonal[i] = 0;\n mtxIndG[i] = 0;\n mtxIndL[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/GenerateProblem_ref.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ODO: This triply nested loop could be flattened or use nested parallelism\n#ifndef HPCG_NO_OPENMP\n for (local_int_t iz=0; izrank << \" \" << currentGlobalRow << \" \" << A.globalToLocalMap[currentGlobalRow] << endl;\n\n char numberOfNonzerosInRow = 0;\n double * currentValuePointer = matrixValues[currentLocalRow]; // Pointer to current value in current row\n global_int_t * currentIndexPointerG = mtxIndG[currentLocalRow]; // Pointer to current index in current row\n for (int sz=-1; sz<=1; sz++) {\n if (giz+sz>-1 && giz+sz-1 && giy+sy-1 && gix+sx #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/SetupHalo_ref.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "/ In the non-MPI case we simply copy global indices to local index storage\n#ifndef HPCG_NO_OPENMP\n for (local_int_t i=0; i< localNumberOfRows; i++) {\n int cur_nnz = nonzerosInRow[i];\n for (int j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/SetupHalo_ref.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "l ids of entry to send\n }\n }\n\n // Convert matrix indices to local IDs\n#ifndef HPCG_NO_OPENMP\n for (local_int_t i=0; i< localNumberOfRows; i++) {\n for (int j=0; jrank==rankIdOfColumnEntry) { // My column index, so convert to local index\n mtxIndL[i][j] = A.globalToLocalMap[curIndex];\n } else { // If column index is not a row index, then it comes from another processor\n mtxIndL[i][j] = externalToLocalMap[curIndex];\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/ComputeDotProduct_ref.cpp", "omp_pragma_line": "#pragma omp parallel for reduction (+:local_result)", "context_chars": 100, "text": "0.0;\n double * xv = x.values;\n double * yv = y.values;\n if (yv==xv) {\n#ifndef HPCG_NO_OPENMP\n for (local_int_t i=0; i #pragma omp parallel for reduction (+:local_result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/ComputeWAXPBY_ref.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " const yv = y.values;\n\tdouble * const wv = w.values;\n if (alpha==1.0) {\n#ifndef HPCG_NO_OPENMP\n for (local_int_t i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/ComputeWAXPBY_ref.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t i=0; ifor (local_int_t i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/ComputeSPMV_ref.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ble * const yv = y.values;\n const local_int_t nrow = A.localNumberOfRows;\n#ifndef HPCG_NO_OPENMP\n for (local_int_t i=0; i< nrow; i++) {\n double sum = 0.0;\n const double * const cur_vals = A.matrixValues[i];\n const local_int_t * const cur_inds = A.mtxIndL[i];\n const int cur_nnz = A.nonzerosInRow[i];\n\n for (int j=0; j< cur_nnz; j++)\n sum += cur_vals[j]*xv[cur_inds[j]];\n yv[i] = sum;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/GenerateCoarseProblem.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "utes the physical placement of arrays of pointers across the memory system\n#ifndef HPCG_NO_OPENMP\n for (local_int_t i=0; i< localNumberOfRows; ++i) {\n f2cOperator[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/GenerateCoarseProblem.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ODO: This triply nested loop could be flattened or use nested parallelism\n#ifndef HPCG_NO_OPENMP\n for (local_int_t izc=0; izc #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/hpcg/src/CheckProblem.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ODO: This triply nested loop could be flattened or use nested parallelism\n#ifndef HPCG_NO_OPENMP\n for (local_int_t iz=0; izrank << \" \" << currentGlobalRow << \" \" << A.globalToLocalMap[currentGlobalRow] << endl;\n\n char numberOfNonzerosInRow = 0;\n double * currentValuePointer = A.matrixValues[currentLocalRow]; // Pointer to current value in current row\n global_int_t * currentIndexPointerG = A.mtxIndG[currentLocalRow]; // Pointer to current index in current row\n for (int sz=-1; sz<=1; sz++) {\n if (giz+sz>-1 && giz+sz-1 && giy+sy-1 && gix+sx #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/perform_element_loop.hpp", "omp_pragma_line": "#pragma omp parallel for shared (elemIDs)", "context_chars": 100, "text": "= 0, t_si = 0;\n timer_type t0 = 0;\n\n const MINIFE_GLOBAL_ORDINAL elemID_size = elemIDs.size();\n\n for(MINIFE_GLOBAL_ORDINAL i=0; i < elemID_size; ++i) {\n ElemData elem_data;\n compute_gradient_values(elem_data.grad_vals);\n\n get_elem_nodes_and_coords(mesh, elemIDs[i], elem_data);\n compute_element_matrix_and_vector(elem_data);\n sum_into_global_linear_system(elem_data, A, b);\n } #pragma omp parallel for shared (elemIDs)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/generate_matrix_structure.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ffset_ptr = &row_offsets[0];\n MINIFE_LOCAL_ORDINAL* const row_coords_ptr = &row_coords[0];\n\n\tfor(int r = 0; r < r_n; ++r) {\n\t\tint iz = r / (xy_width) + box[2][0];\n\t\tint iy = (r / x_width) % y_width + box[1][0];\n\t\tint ix = r % x_width + box[0][0];\n\n \tGlobalOrdinal row_id =\n \tget_id(global_nodes_x, global_nodes_y, global_nodes_z,\n \tix, iy, iz);\n \trow_ptr[r] = mesh.map_id_to_row(row_id);\n \trow_coords_ptr[r*3] = ix;\n \trow_coords_ptr[r*3+1] = iy;\n row_coords_ptr[r*3+2] = iz;\n\n\t\t\tMINIFE_LOCAL_ORDINAL nnz = 0;\n for(int sz=-1; sz<=1; ++sz) {\n \tfor(int sy=-1; sy<=1; ++sy) {\n \tfor(int sx=-1; sx<=1; ++sx) {\n \tGlobalOrdinal col_id =\nget_id(global_nodes_x, global_nodes_y, global_nodes_z,\n\t ix+sx, iy+sy, iz+sz);\n\n \tif (col_id >= 0 && col_id < global_nrows) {\n \t++nnz;\n \t}\n \t}\n \t}\n \t}\n \trow_offset_ptr[r+1] = nnz;\n\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/exchange_externals.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ".size();\n#ifdef MINIFE_DEBUG\n os << \"total_to_be_sent: \" << total_to_be_sent << std::endl;\n#endif\n\nfor(size_t i=0; i x.local_size) {\n os << \"error, out-of-range. x.coefs.size()==\"< #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "T wcoefs __attribute__ ((aligned (64))) = &w.coefs[0];\n\n if(beta == 0.0) {\n\tif(alpha == 1.0) {\n \t\t#pragma vector nontemporal\n\t\t#pragma unroll(8)\n \t\tfor(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\t#pragma unroll(8)\n \t\tfor(int i=0; i#pragma vector nontemporal\n\t\t#pragma unroll(8)\n \t\tfor(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "=0; i#pragma vector nontemporal\n\t\t#pragma unroll(8)\n \t\tfor(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \t\tfor(int i=0; i#pragma vector nontemporal\n\t\t#pragma unroll(8)\n \t\tfor(int i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "STRICT ycoefs __attribute__ ((aligned (64))) = &y.coefs[0];\n\n if(alpha == 1.0 && beta == 1.0) {\n\t #pragma vector nontemporal\n\t #pragma unroll(8)\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] += xcoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] += xcoefs[i];\n \t }\n } else if (beta == 1.0) {\n\t #pragma vector nontemporal\n\t #pragma unroll(8)\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] += alpha * xcoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt i = 0; i < n; ++i) {\n\t ycoefs[i] += alpha * xcoefs[i];\n \t }\n } else if (alpha == 1.0) {\n\t #pragma vector nontemporal\n\t #pragma unroll(8)\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] = xcoefs[i] + beta * ycoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " i < n; ++i) {\n\t ycoefs[i] = xcoefs[i] + beta * ycoefs[i];\n \t }\n } else if (beta == 0.0) {\n\t #pragma vector nontemporal\n\t #pragma unroll(8)\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] = alpha * xcoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " unroll(8)\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] = alpha * xcoefs[i];\n \t }\n } else {\n\t #pragma vector nontemporal\n\t #pragma unroll(8)\n\t for(int i = 0; i < n; ++i) {\n\t ycoefs[i] = alpha * xcoefs[i] + beta * ycoefs[i];\n \t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:result)", "context_chars": 100, "text": "INIFE_RESTRICT ycoefs __attribute__ ((aligned (64))) = &y.coefs[0];\n\n MINIFE_SCALAR result = 0;\n\n for(int i=0; i #pragma omp parallel for reduction(+:result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector_functions.hpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:result)", "context_chars": 100, "text": "MINIFE_RESTRICT xcoefs __attribute__ ((aligned (64))) = &x.coefs[0];\n MINIFE_SCALAR result = 0;\n\n #pragma unroll(8)\n for(MINIFE_LOCAL_ORDINAL i = 0; i < n; ++i) {\n \tresult += xcoefs[i] * xcoefs[i];\n } #pragma omp parallel for reduction(+:result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/CSRMatrix.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rows * ncols_per_row;\n packed_cols.reserve(nrows_max);\n packed_coefs.reserve(nrows_max);\n\n for(MINIFE_GLOBAL_ORDINAL i = 0; i < nrows; ++i) {\n\trows[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/CSRMatrix.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ma omp parallel for\n for(MINIFE_GLOBAL_ORDINAL i = 0; i < nrows; ++i) {\n\trows[i] = 0;\n }\n\n for(MINIFE_GLOBAL_ORDINAL i = 0; i < nrows + 1; ++i) {\n\trow_offsets[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/CSRMatrix.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "llel for\n for(MINIFE_GLOBAL_ORDINAL i = 0; i < nrows + 1; ++i) {\n\trow_offsets[i] = 0;\n }\n\n for(MINIFE_GLOBAL_ORDINAL i = 0; i < nrows_max; ++i) {\n\tpacked_cols[i] = 0;\n\tpacked_coefs[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/SparseMatrix_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n zero_row_and_put_1_on_diagonal(A, row);\n }\n }\n\n const int ROW_COUNT = A.rows.size();\n\n for(MINIFE_GLOBAL_ORDINAL i=0; i < ROW_COUNT; ++i) {\n GlobalOrdinal row = A.rows[i];\n\n if (bc_rows.find(row) != bc_rows.end()) continue;\n\n size_t row_length = 0;\n GlobalOrdinal* cols = NULL;\n Scalar* coefs = NULL;\n A.get_row_pointers(row, row_length, cols, coefs);\n\n Scalar sum = 0;\n for(size_t j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/SparseMatrix_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "4))) = &x.coefs[0];\n ScalarType* ycoefs __attribute__((aligned(64))) = &y.coefs[0];\n\n for(MINIFE_GLOBAL_ORDINAL row = 0; row < rows_size; ++row) {\n const MINIFE_GLOBAL_ORDINAL row_start = Arowoffsets[row];\n const MINIFE_GLOBAL_ORDINAL row_end = Arowoffsets[row+1];\n\n MINIFE_SCALAR sum = 0;\n\n #pragma loop_count(15)\n\t\t#pragma vector nontemporal\n for(MINIFE_GLOBAL_ORDINAL i = row_start; i < row_end; ++i) {\n sum += Acoefs[i] * xcoefs[Acols[i]];\n }\n\n ycoefs[row] = sum;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/SparseMatrix_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "alarType* xcoefs = &x.coefs[0];\n ScalarType* ycoefs = &y.coefs[0];\n ScalarType beta = 0;\n\n for(int row=0; row #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/SparseMatrix_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = &y.coefs[0];\n const ScalarType beta = 0;\n\n for(MINIFE_GLOBAL_ORDINAL row = 0; row < rows_size; ++row) {\n const MINIFE_GLOBAL_ORDINAL row_start = Arowoffsets[row];\n const MINIFE_GLOBAL_ORDINAL row_end = Arowoffsets[row+1];\n\n MINIFE_SCALAR sum = 0;\n\n #pragma loop_count(15)\n for(MINIFE_GLOBAL_ORDINAL i = row_start; i < row_end; ++i) {\n sum += Acoefs[i] * xcoefs[Acols[i]];\n }\n\n ycoefs[row] = sum;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/SparseMatrix_functions.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "alarType* xcoefs = &x.coefs[0];\n ScalarType* ycoefs = &y.coefs[0];\n ScalarType beta = 0;\n\n for(int row=0; row #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/main.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:value)", "context_chars": 100, "text": "PENMP\n const int thread_count = omp_get_max_threads();\n#else\n const int thread_count = 1;\n#endif\n\nfor(int i = 0; i < thread_count; ++i) {\n\tvalue += 1;\n } #pragma omp parallel for reduction(+:value)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openhpc/ohpc/tests/apps/miniFE/tests/src/Vector.hpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int )coefs) % 64);\n }\n\n const MINIFE_LOCAL_ORDINAL n = (MINIFE_LOCAL_ORDINAL) local_sz;\n\n for(MINIFE_LOCAL_ORDINAL i = 0; i < n; ++i) {\n\tcoefs[i] = 0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/yzhaiustc/Optimizing-DGEMV-on-Intel-CPUs/mydgemv.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": " i++;\n }\n\n}\n\nvoid mydgemv_t(double *A, double *x, double *y, int m, int n)\n{\n int tid;\nfor (tid = 0; tid < omp_get_num_threads(); tid++)\n {\n int TOTAL_THREADS = omp_get_num_threads();\n long int NUM_DIV_NUM_THREADS = m / TOTAL_THREADS * TOTAL_THREADS;\n long int DIM_LEN = m / TOTAL_THREADS;\n long int EDGE_LEN = (NUM_DIV_NUM_THREADS == m) ? m / TOTAL_THREADS : m - NUM_DIV_NUM_THREADS + DIM_LEN;\n if (tid == 0)\n mydgemv_compute(A,x,y,EDGE_LEN,n);\n else\n mydgemv_compute(A+EDGE_LEN*n + (tid - 1) * DIM_LEN*n,x,y + EDGE_LEN + (tid - 1) * DIM_LEN,DIM_LEN,n);\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/dlb/doc/examples/mpi+omp_ompt/mpi_omp_pils.c", "omp_pragma_line": "#pragma omp parallel for private(j) reduction(+:iter_time) shared(fib)", "context_chars": 100, "text": " ifor(j=i; j #pragma omp parallel for private(j) reduction(+:iter_time) shared(fib)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/dlb/doc/examples/mpi+omp/mpi_omp_pils.c", "omp_pragma_line": "#pragma omp parallel for private(j) reduction(+:iter_time) shared(fib)", "context_chars": 100, "text": " int upper_bound = MIN(steps, i+BS);\n\n int j;\n DLB_Borrow();\n for(j=i; j #pragma omp parallel for private(j) reduction(+:iter_time) shared(fib)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/src/tl/omp/core/tl-omp-reduction.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(A::F : A::d)", "context_chars": 100, "text": "argument is special, we have to look for a ':' that is not followed by any other ':'\n // std::string current_argument = *list_it;\n\n // Trim blanks\n current_argument.erase(std::remove(current_argument.begin(), current_argument.end(), ' '), current_argument.end());\n\n std::string::iterator split_colon = current_argument.end();\n for (std::string::iterator it = current_argument.begin();\n it != current_argument.end();\n it++)\n {\n if ((*it) == ':'\n && (it + 1) != current_argument.end())\n {\n if (*(it + 1) != ':')\n {\n split_colon = it;\n break;\n }\n else\n {\n // Next one is also a ':' but it is not a valid splitting\n // ':', so ignore it\n it++;\n }\n }\n } #pragma omp parallel for reduction(A::F : A::d)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/src/tl/omp/core/tl-omp-core.cpp", "omp_pragma_line": "#pragma omp parallel for'\\n\");", "context_chars": 100, "text": " error_printf_at(n.get_locus(), \"invalid 'break' inside '#pragma omp for' or '}\n }\n else if (IS_FORTRAN_LANGUAGE)\n {\n if (n.get_construct_name().is_null() && nest_of_break == 0)\n {\n error_printf_at(n.get_locus(), \"invalid 'EXIT' inside '!$OMP DO' or '!$OMP PARALLEL DO'\\n\");\n }\n else if (!n.get_construct_name().is_null())\n {\n TL::Symbol name = n.get_construct_name().get_symbol();\n if ((name == loop_label)\n || !stack_of_labels.contains(name))\n {\n // We are doing an EXIT of the whole loop or a loop not nested\n error_printf_at(n.get_locus(), \"invalid 'EXIT' inside '!$OMP DO' or '!$OMP PARALLEL DO'\\n\");\n }\n }\n } #pragma omp parallel for'\\n\");"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/simd/mic/success_simd_34_parallel_convolv_overlap_epilog.c", "omp_pragma_line": "#pragma omp parallel for default(none) \\", "context_chars": 100, "text": "ge, 64);\n __assume_aligned(filter, 64);\n\n __assume((W%16) == 0);\n __assume((H%16) == 0);\n\n\nprivate(mv, mh, i, j) \\\n firstprivate(input_image, output_image, filter, H, W, K, normal_factor) \\\n schedule(static) num_threads(183)\n for (mv = 0; mv < W-K; mv++)\n {\t\t\t\t/* rows, top-to-bottom */\n for (mh = 0; mh < H-K; mh++)\n {\t\t\t/* columns, LEFT-to-RIGHT */\n int sum = 0;\n for (i = 0; i < K; i++)\n {\n for (j = 0; j < K; j++)\n {\t\t/* FIR outer-loop... */\n sum += input_image[(mh + i)*W + mv + j] * filter[i*K + j];\n } \n }\n output_image[mh * W + mv] = (sum >> normal_factor);\n }\n } #pragma omp parallel for default(none) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/simd/mic/success_simd_34_parallel_convolv_overlap_epilog.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "id **) &filter,\n VECTOR_SIZE, K*K * sizeof(int)) != 0)\n {\n exit(1);\n }\n\nfor (i = 0; i < H; i++)\n {\n for (j = 0; j < W; ++j)\n {\n input_image[i * W + j] = i + j;\n output_image[i * W + j] = 0;\n }\n } #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/simd/mic/success_simd_34_parallel_convolv_overlap_epilog.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "utput_image_sc,\n VECTOR_SIZE, H*W * sizeof(int)) != 0)\n {\n exit(1);\n }\n\nfor (i = 0; i < H; i++)\n {\n for (j = 0; j < W; ++j)\n {\n output_image_sc[i * W + j] = 0;\n }\n } #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/simd/mic/success_simd_33_parallel_convolv_overlap.c", "omp_pragma_line": "#pragma omp parallel for default(none) \\", "context_chars": 100, "text": "ge, 64);\n __assume_aligned(filter, 64);\n\n __assume((W%16) == 0);\n __assume((H%16) == 0);\n\n\nprivate(mv, mh, i, j) \\\n firstprivate(input_image, output_image, filter, H, W, K, normal_factor) \\\n schedule(static) num_threads(183)\n for (mv = 0; mv < W-K; mv++)\n {\t\t\t\t/* rows, top-to-bottom */\n for (mh = 0; mh < H-K; mh++)\n {\t\t\t/* columns, LEFT-to-RIGHT */\n int sum = 0;\n for (i = 0; i < K; i++)\n {\n for (j = 0; j < K; j++)\n {\t\t/* FIR outer-loop... */\n sum += input_image[(mh + i)*W + mv + j] * filter[i*K + j];\n } \n }\n output_image[mh * W + mv] = (sum >> normal_factor);\n }\n } #pragma omp parallel for default(none) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/simd/mic/success_simd_33_parallel_convolv_overlap.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "id **) &filter,\n VECTOR_SIZE, K*K * sizeof(int)) != 0)\n {\n exit(1);\n }\n\nfor (i = 0; i < H; i++)\n {\n for (j = 0; j < W; ++j)\n {\n input_image[i * W + j] = i + j;\n output_image[i * W + j] = 0;\n }\n } #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/simd/mic/success_simd_33_parallel_convolv_overlap.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "utput_image_sc,\n VECTOR_SIZE, H*W * sizeof(int)) != 0)\n {\n exit(1);\n }\n\nfor (i = 0; i < H; i++)\n {\n for (j = 0; j < W; ++j)\n {\n output_image_sc[i * W + j] = 0;\n }\n } #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/simd/mic/success_simd_32_parallel_moving_average_overlap.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(N) private(i)", "context_chars": 100, "text": "_memalign((void **) &result_sc, VECTOR_SIZE, N * sizeof(float)) != 0)\n {\n exit(1);\n }\n\nfor (i=0; i #pragma omp parallel for firstprivate(N) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/simd/mic/success_simd_32_parallel_moving_average_overlap.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(N, points, input, result, result_sc)", "context_chars": 100, "text": "\n fprintf(stderr, \"%.10f\\n\", result[N-1]);\n\n moving_average_sc(input, result_sc, N, points);\n\nfor(i=0; i 5)\n {\n printf(\"ERROR (%i) %.20f != %.20f\\n\", i, result[i], result_sc[i]);\n exit(1);\n }\n } #pragma omp parallel for firstprivate(N, points, input, result, result_sc)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/simd/mic/success_simd_35_parallel_heat_eq.c", "omp_pragma_line": "#pragma omp parallel for private(k, j, i) firstprivate(Anext, Anext_sc, nx) shared(stderr, errors) schedule(static) num_threads(183) default(none)", "context_chars": 100, "text": "_sc);\n \n\n /* stencil function */ \n stencil_14_sc(A0_sc, Anext_sc, nx, \n tx, timesteps);\n\nfor (i=0; i 0.001f)\n {\n fprintf(stderr, \"Error: abs(Anext[%d]=%f - Anext_sc[%d]=%f) = %f\\n\",\n i,\n Anext[i],\n i,\n Anext_sc[i],\n abs);\n\n errors = 1;\n }\n } #pragma omp parallel for private(k, j, i) firstprivate(Anext, Anext_sc, nx) shared(stderr, errors) schedule(static) num_threads(183) default(none)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_new_udr_02.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(A::plus : x)", "context_chars": 100, "text": "[N];\n A::myInt x = {0};\n\n for ( i = 0; i < N ; i++ ) {\n a[i] = i;\n s += i;\n }\n\n for ( i = 0; i < N ; i++ )\n {\n x += a[i];\n } #pragma omp parallel for reduction(A::plus : x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_new_udr_12.cpp", "omp_pragma_line": "#pragma omp parallel for reduction (foo : x)", "context_chars": 100, "text": "omp_out, &omp_in) )\n\nint main (int argc, char* argv[])\n{\n std::vector v(10,1);\n int x;\n\n for (int i=0; i<10; i++)\n {\n x += v[i];\n } #pragma omp parallel for reduction (foo : x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_new_udr_01.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:x)", "context_chars": 100, "text": "\n int a[N];\n myInt x;\n\n for ( i = 0; i < N ; i++ ) {\n a[i] = i;\n s += i;\n }\n\n for ( i = 0; i < N ; i++ )\n {\n x += a[i];\n } #pragma omp parallel for reduction(+:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_new_udr_13.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(merge : v2)", "context_chars": 100, "text": " ) )\n\nint main (int argc, char* argv[])\n{\n std::vector v1(5);\n std::vector v2(5);\n\n for (int i=0; i<5; i++)\n {\n v1 = v2;\n } #pragma omp parallel for reduction(merge : v2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_for_04.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "fo>\n*/\n#include \n\ntemplate \nvoid f(T* t, T lower, T length, T val)\n{\n T i;\nfor (i = lower; i < (lower + length); i++)\n {\n t[i] = val + i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_new_udr_07.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(A::plus:x)", "context_chars": 100, "text": " int a[N];\n A::myInt x;\n\n for ( i = 0; i < N ; i++ ) {\n a[i] = i;\n s += i;\n }\n\n for ( i = 0; i < N ; i++ )\n {\n x += a[i];\n } #pragma omp parallel for reduction(A::plus:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_collapse_01.cpp", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "][j] == 1);\n}\n\n\nint main(int argc, char* argv[])\n{\n int v[N][N];\n\n {\n init(v);\n for(int i = 0; i < N; ++i)\n for(int j = 0; j < N; ++j)\n v[i][j] += 1;\n check(v);\n }\n\n {\n init(v);\n #pragma omp parallel\n {\n #pragma omp for collapse(2)\n for(int i = 0; i < N; ++i)\n for(int j = 0; j < N; ++j)\n v[i][j] += 1;\n }\n check(v);\n } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_new_udr_08.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(myInt::operator+ :x)", "context_chars": 100, "text": " int a[N];\n myInt x;\n\n for ( i = 0; i < N ; i++ ) {\n a[i] = i;\n s += i;\n }\n\n for ( i = 0; i < N ; i++ )\n {\n x += a[i];\n } #pragma omp parallel for reduction(myInt::operator+ :x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_for_03.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " c[i] = i;\n }\n\n for (i=0; i < x + y; i = i + 2)\n {\n if (c[i] != i) abort();\n }\n\nfor (i=0; i < x + y; i = i + 2)\n {\n c[i] = 2*i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/cxx/success_new_udr_20.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(plus : sum)", "context_chars": 100, "text": "zer(init(&omp_priv))\n\nmy_data_t foo(my_data_t* v, int n)\n{\n my_data_t sum;\n sum.my_data = 0;\n\nfor (int i = 0; i < n; i++)\n {\n reducer(&sum, &v[i]);\n } #pragma omp parallel for reduction(plus : sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_for_10.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a, b) lastprivate(a, b)", "context_chars": 100, "text": "char *argv[])\n{\n int b[10];\n int i;\n\n a[1] = 3;\n a[2] = 2;\n b[4] = 4;\n b[5] = 5;\n\nfor (i = 0; i < 10; i++)\n {\n a[1] = a[2];\n b[4] = b[5];\n } #pragma omp parallel for firstprivate(a, b) lastprivate(a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_new_udr_03.c", "omp_pragma_line": "#pragma omp parallel for reduction(mymin:x)", "context_chars": 100, "text": "for ( i = 0; i < N ; i++ ) a[i] = i;\n\n#ifdef NANOX\n #pragma omp for reduction(mymin:x)\n#else\n for ( i = 0; i < N ; i++ )\n {\n x = a[i] < x ? a[i] : x;\n } #pragma omp parallel for reduction(mymin:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_for_05.c", "omp_pragma_line": "#pragma omp parallel for lastprivate(a, b)", "context_chars": 100, "text": "\n\nint a;\n\nint main(int argc, char *argv[])\n{\n int b;\n int i;\n\n a = 3;\n b = 4;\n\nfor (i = 0; i < 10; i++)\n {\n a = i;\n b = i + 1;\n } #pragma omp parallel for lastprivate(a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_parallel_for_04.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "estinfo>\n*/\n\n#ifndef __ICC\n\n#include \n\nvoid f(int N1, int N2, int a[N1][N2])\n{\n int i;\nfor (i = 0; i < N1; i++)\n {\n int j;\n for (j = 0; j < N2; j++)\n {\n a[i][j] = i - j;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_new_udr_05.c", "omp_pragma_line": "#pragma omp parallel for reduction(mymin:x)", "context_chars": 100, "text": "for ( i = 0; i < N ; i++ ) a[i] = i;\n\n#ifdef NANOX\n #pragma omp for reduction(mymin:x)\n#else\n for ( i = 0; i < N ; i++ )\n {\n x = a[i] < x ? a[i] : x;\n } #pragma omp parallel for reduction(mymin:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_array_reduction_01.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a) reduction(+:[ARRAY_SIZE]result)", "context_chars": 100, "text": "Y_SIZE], int *result)\n{\n int i;\n for (i = 0; i < ARRAY_SIZE; i++)\n {\n result[i] = 0;\n }\n\n for (i = 0; i < NUM_ITEMS; i++)\n {\n int j;\n for (j = 0; j < ARRAY_SIZE; j++)\n {\n result[j] += a[i][j];\n }\n } #pragma omp parallel for firstprivate(a) reduction(+:[ARRAY_SIZE]result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_new_udr_07.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x)", "context_chars": 100, "text": "\n struct myInt x = {0};\n\n for ( i = 0; i < N ; i++ ) {\n a[i] = i;\n s += i;\n }\n\n for ( i = 0; i < N ; i++ )\n {\n x.x += a[i];\n } #pragma omp parallel for reduction(+:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_new_udr_04.c", "omp_pragma_line": "#pragma omp parallel for reduction(mymin:x)", "context_chars": 100, "text": "for ( i = 0; i < N ; i++ ) a[i] = i;\n\n#ifdef NANOX\n #pragma omp for reduction(mymin:x)\n#else\n for ( i = 0; i < N ; i++ )\n {\n x = a[i] < x ? a[i] : x;\n } #pragma omp parallel for reduction(mymin:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_array_reduction_02.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a) reduction(+:[array_size]result)", "context_chars": 100, "text": "y_size], int *result)\n{\n int i;\n for (i = 0; i < array_size; i++)\n {\n result[i] = 0;\n }\n\n for (i = 0; i < NUM_ITEMS; i++)\n {\n int j;\n for (j = 0; j < array_size; j++)\n {\n result[j] += a[i][j];\n }\n } #pragma omp parallel for firstprivate(a) reduction(+:[array_size]result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_new_udr_02.c", "omp_pragma_line": "#pragma omp parallel for reduction(mymin:x)", "context_chars": 100, "text": "for ( i = 0; i < N ; i++ ) a[i] = i;\n\n#ifdef NANOX\n #pragma omp for reduction(mymin:x)\n#else\n for ( i = 0; i < N ; i++ )\n {\n x = a[i] < x ? a[i] : x;\n } #pragma omp parallel for reduction(mymin:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/failure_parallel_for_01.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i)", "context_chars": 100, "text": "fig/mercurium-omp\ntest_compile_fail=yes\ntest_nolink=yes\n\n*/\n\nint main()\n{\n int i;\n for (i = 0; i < 100; ++i)\n {\n } #pragma omp parallel for firstprivate(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_new_udr_01.c", "omp_pragma_line": "#pragma omp parallel for reduction(mymin:x)", "context_chars": 100, "text": "\n\n for ( i = 0; i < N ; i++ ) a[i] = i;\n\n#ifdef NANOX\n#pragma omp for reduction(mymin:x)\n#else\n for ( i = 0; i < N ; i++ )\n {\n x = a[i] < x ? a[i] : x;\n } #pragma omp parallel for reduction(mymin:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_for_06.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a, b) lastprivate(a, b)", "context_chars": 100, "text": "e \n\nint a;\nint b;\n\nint main(int argc, char *argv[])\n{\n int i;\n\n a = -3;\n b = -4;\n\nfor (i = 0; i < 100; i++)\n {\n if (a < 0) if (a != -3) abort();\n if (b < 0) if (b != -4) abort();\n\n a = i;\n b = i;\n } #pragma omp parallel for firstprivate(a, b) lastprivate(a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_vla_04.c", "omp_pragma_line": "#pragma omp parallel for shared(v)", "context_chars": 100, "text": "[j]: %d\\n\", v[i][j]);\n assert(v[i][j] == 0);\n v[i][j]++;\n }\n }\n\n for (int i = 0; i < n; ++i)\n {\n for (int j = 0; j < n; ++j)\n {\n// printf(\"2. v[i][j]: %d\\n\", v[i][j]);\n assert(v[i][j] == 0);\n v[i][j]++;\n }\n } #pragma omp parallel for shared(v)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_vla_04.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(v)", "context_chars": 100, "text": "[j]: %d\\n\", v[i][j]);\n assert(v[i][j] == 0);\n v[i][j]++;\n }\n }\n\n for (int i = 0; i < n; ++i)\n {\n for (int j = 0; j < n; ++j)\n {\n// printf(\"3. v[i][j]: %d\\n\", v[i][j]);\n assert(v[i][j] == 1);\n v[i][j]++;\n }\n } #pragma omp parallel for firstprivate(v)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_collapse_01.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "\n\nint main(int argc, char* argv[])\n{\n int v[N][N];\n int i, j;\n\n {\n init(v);\n for(i = 0; i < N; ++i)\n for(j = 0; j < N; ++j)\n v[i][j] += 1;\n check(v);\n }\n\n {\n init(v);\n #pragma omp parallel\n {\n #pragma omp for collapse(2)\n for(i = 0; i < N; ++i)\n for(j = 0; j < N; ++j)\n v[i][j] += 1;\n }\n check(v);\n } #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_parallel_for_03.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": "ude \n\n#define NUM_ELEMS 100\n\nint main(int argc, char *argv[])\n{\n int i;\n int s = 0;\n\nfor (i = 0; i < NUM_ELEMS; i++)\n {\n s = s + i;\n } #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_new_udr_06.c", "omp_pragma_line": "#pragma omp parallel for reduction(mymin:x)", "context_chars": 100, "text": "for ( i = 0; i < N ; i++ ) a[i] = i;\n\n#ifdef NANOX\n #pragma omp for reduction(mymin:x)\n#else\n for ( i = 0; i < N ; i++ )\n {\n x = a[i] < x ? a[i] : x;\n } #pragma omp parallel for reduction(mymin:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_parallel_for_02.c", "omp_pragma_line": "#pragma omp parallel for private(x)", "context_chars": 100, "text": "lude \n\n#define NUM_ELEMS 1000\n\nint main(int argc, char *argv[])\n{\n int c[NUM_ELEMS], x;\nfor (int i = 0; i < NUM_ELEMS; i++)\n {\n x = i;\n c[i] = i;\n } #pragma omp parallel for private(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_task_03.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "dlib.h>\n#define NUM_ELEMS 100\n\nint main(int argc, char* argv[])\n{\n int i;\n int c[NUM_ELEMS];\n\nfor (i = 0; i < NUM_ELEMS; i++)\n {\n int *p = &(c[i]);\n#pragma omp task firstprivate(p, i)\n {\n *p = i;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_new_udr_08.c", "omp_pragma_line": "#pragma omp parallel for reduction(complex_add:x) reduction(complex_mul:y)", "context_chars": 100, "text": ")\n\n#define N 100\nmy_complex_t vector[N];\n\nint foo(my_complex_t x, my_complex_t y)\n{\n int i;\n\n for ( i = 0; i < N ; i++ ) \n {\n x = complex_add(x,vector[i]);\n y = complex_mul(y,vector[i]);\n } #pragma omp parallel for reduction(complex_add:x) reduction(complex_mul:y)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_for_16.c", "omp_pragma_line": "#pragma omp parallel for default(none)", "context_chars": 100, "text": "nt argc, char* argv[])\n{\n int i;\n#pragma omp for default(none)\n for(i=0;i<8;i++)\n {\n }\n\nfor(i=0;i<8;i++)\n {\n } #pragma omp parallel for default(none)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_omp.dg/common/c/success_parallel_for_01.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\n#define NUM_ELEMS 1000\n\nint main(int argc, char *argv[])\n{\n int i = -1;\n int c[NUM_ELEMS];\n\nfor (i = 0; i < NUM_ELEMS; i++)\n {\n c[i] = i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_ompss.dg/cxx/success_task_13.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:result)", "context_chars": 100, "text": "6_imcxx=yes\n\n*/\ntemplate < typename T>\nint foo(T* x, T* y, int n)\n{\n int result = 0;\nfor (int i = 0; i < n; ++i)\n {\n result += x[i] + y[i];\n } #pragma omp parallel for reduction(+:result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_ompss.dg/cxx/success_parallel_for_slicer_01.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": "nclude \n#include \n\nint main(int argc, char *argv[])\n{\n int i;\n int s = 0;\n\nfor (i = 0; i < 100; i++)\n {\n s += i;\n } #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_ompss.dg/cxx/success_for_01.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "fo>\n*/\n#include \n\ntemplate \nvoid f(T* t, T lower, T length, T val)\n{\n T i;\nfor (i = lower; i < (lower + length); i++)\n {\n t[i] = val + i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_ompss.dg/c/success_for_slicer_03.c", "omp_pragma_line": "#pragma omp parallel for lastprivate(a, b)", "context_chars": 100, "text": "\n\nint a;\n\nint main(int argc, char *argv[])\n{\n int b;\n int i;\n\n a = 3;\n b = 4;\n\nfor (i = 0; i < 10; i++)\n {\n a = i;\n b = i + 1;\n } #pragma omp parallel for lastprivate(a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_ompss.dg/c/success_for_slicer_04.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a, b) lastprivate(a, b)", "context_chars": 100, "text": "e \n\nint a;\nint b;\n\nint main(int argc, char *argv[])\n{\n int i;\n\n a = -3;\n b = -4;\n\nfor (i = 0; i < 100; i++)\n {\n if (a < 0) if (a != -3) abort();\n if (b < 0) if (b != -4) abort();\n\n a = i;\n b = i;\n } #pragma omp parallel for firstprivate(a, b) lastprivate(a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_ompss.dg/c/success_for_slicer_05.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a, b) lastprivate(a, b)", "context_chars": 100, "text": "char *argv[])\n{\n int b[10];\n int i;\n\n a[1] = 3;\n a[2] = 2;\n b[4] = 4;\n b[5] = 5;\n\nfor (i = 0; i < 10; i++)\n {\n a[1] = a[2];\n b[4] = b[5];\n } #pragma omp parallel for firstprivate(a, b) lastprivate(a, b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_ompss.dg/c/success_array_reductions_01.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a) reduction(+:[array_size]result)", "context_chars": 100, "text": "y_size], int *result)\n{\n int i;\n for (i = 0; i < array_size; i++)\n {\n result[i] = 0;\n }\n\n for (i = 0; i < NUM_ITEMS; i++)\n {\n int j;\n for (j = 0; j < array_size; j++)\n {\n result[j] += a[i][j];\n }\n } #pragma omp parallel for firstprivate(a) reduction(+:[array_size]result)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/07_phases_ompss.dg/c/success_parallel_for_slicer_01.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:s)", "context_chars": 100, "text": "nclude \n#include \n\nint main(int argc, char *argv[])\n{\n int i;\n int s = 0;\n\nfor (i = 0; i < 100; i++)\n {\n s += i;\n } #pragma omp parallel for reduction(+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/02_typecalc_cxx.dg/success_650.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rator=config/mercurium\ntest_CXXFLAGS=\"-fopenmp\"\n\n*/\n#include \n\nvoid foo(int N)\n{\nfor (int i = 0; i < N; i++)\n {\n printf(\"%d\\n\", i);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/bsc-pm/mcxx/tests/02_typecalc_cxx.dg/success_650.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "id, but also checks that we convert 'int i(0)' into\n// 'int i = 0' in this case.\nvoid foo2(int N)\n{\nfor (int i(0); i < N; i++)\n {\n printf(\"%d\\n\", i);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/C1z/nstream-ua-target.c", "omp_pragma_line": "#pragma omp parallel for simd schedule(static)", "context_chars": 100, "text": "loc(bytes, host);\n double * restrict C = omp_target_alloc(bytes, host);\n\n double scalar = 3.0;\n\n for (size_t i=0; i #pragma omp parallel for simd schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/C1z/nstream-ua-target.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:asum)", "context_chars": 100, "text": "0; i<=iterations; i++) {\n ar += br + scalar * cr;\n }\n\n ar *= length;\n\n double asum = 0.0;\n for (size_t i=0; i #pragma omp parallel for reduction(+:asum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/C1z/nstream-target.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "strict B = prk_malloc(bytes);\n double * restrict C = prk_malloc(bytes);\n\n double scalar = 3.0;\n\n for (size_t i=0; i #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/C1z/nstream-target.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:asum)", "context_chars": 100, "text": "0; i<=iterations; i++) {\n ar += br + scalar * cr;\n }\n\n ar *= length;\n\n double asum = 0.0;\n for (size_t i=0; i #pragma omp parallel for reduction(+:asum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/C1z/nstream-memcpy-target.c", "omp_pragma_line": "#pragma omp parallel for simd schedule(static)", "context_chars": 100, "text": "c(bytes, host);\n double * restrict h_C = omp_target_alloc(bytes, host);\n\n double scalar = 3.0;\n\n for (size_t i=0; i #pragma omp parallel for simd schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/C1z/nstream-memcpy-target.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:asum)", "context_chars": 100, "text": "0; i<=iterations; i++) {\n ar += br + scalar * cr;\n }\n\n ar *= length;\n\n double asum = 0.0;\n for (size_t i=0; i #pragma omp parallel for reduction(+:asum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/OPENMP/PIC/pic.c", "omp_pragma_line": "#pragma omp parallel for private(i, p, fx, fy, ax, ay)", "context_chars": 100, "text": "{\n pic_time = wtime();\n }\n\n /* Calculate forces on particles and update positions */\n for (i=0; i #pragma omp parallel for private(i, p, fx, fy, ax, ay)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/OPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:abserr)", "context_chars": 100, "text": "bserr=0.0;\n size_t i, j;\n\n double addit = ((double)(iterations+1) * (double) (iterations))/2.0;\n for (j=0;j #pragma omp parallel for reduction(+:abserr)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/OPENMP/DGEMM/dgemm.c", "omp_pragma_line": "#pragma omp parallel for private(i,j) ", "context_chars": 100, "text": "exit(EXIT_FAILURE);\n }\n\n ref_checksum = (0.25*forder*forder*forder*(forder-1.0)*(forder-1.0));\n\n for(j = 0; j < order; j++) for(i = 0; i < order; i++) {\n A_arr(i,j) = B_arr(i,j) = (double) j; \n C_arr(i,j) = 0.0;\n } #pragma omp parallel for private(i,j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": " /* intialize the input and output arrays */\n#if PARALLELFOR\n #else\n #pragma omp for\n\n for (j=0; j #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "r\n#endif\n for (j=0; j#else\n #pragma omp for\n\n for (j=RADIUS; j #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private(i, ii, jj)", "context_chars": 100, "text": "pragma omp master\n#endif\n {\n stencil_time = wtime();\n }\n }\n\n#if PARALLELFOR\n #else\n #pragma omp for\n\n for (j=RADIUS; j #pragma omp parallel for private(i, ii, jj)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": " /* add constant to solution to force refresh of neighbor data, if any */\n#if PARALLELFOR\n #else\n #pragma omp for\n\n for (j=0; j #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/OPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:norm), private (i)", "context_chars": 100, "text": " /* compute L1 norm in parallel */\n#if PARALLELFOR\n #else\n #pragma omp for reduction(+:norm)\n\n for (j=RADIUS; j #pragma omp parallel for reduction(+:norm), private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/Cxx11/sgemm-cblas.cc", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) num_threads(nt)", "context_chars": 100, "text": "\n const int n = order;\n const float alpha = 1.0;\n const float beta = 1.0;\n\n#ifdef _OPENMP\nfor (int b=0; b #pragma omp parallel for schedule(dynamic) num_threads(nt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/Cxx11/dgemm-cblas.cc", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) num_threads(nt)", "context_chars": 100, "text": " const int n = order;\n const double alpha = 1.0;\n const double beta = 1.0;\n\n#ifdef _OPENMP\nfor (int b=0; b #pragma omp parallel for schedule(dynamic) num_threads(nt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/Cxx11/dgemm-mpi-cblas.cc", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) num_threads(nt)", "context_chars": 100, "text": " const int n = order;\n const double alpha = 1.0;\n const double beta = 1.0;\n\n#ifdef _OPENMP\nfor (int b=0; b #pragma omp parallel for schedule(dynamic) num_threads(nt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i,it,jt) collapse(2)", "context_chars": 100, "text": " */\n istart = 0; \n\n if (tiling) {\n#if COLLAPSE\n #else\n #pragma omp parallel for private (i,it,jt)\n\n for (j=0; j #pragma omp parallel for private (i,it,jt) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i,it,jt)", "context_chars": 100, "text": "\n\n if (tiling) {\n#if COLLAPSE\n #pragma omp parallel for private (i,it,jt) collapse(2)\n#else\n for (j=0; j #pragma omp parallel for private (i,it,jt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "jt) = (double) (order*(jt+colstart) + it);\n B(it,jt) = 0.0;\n }\n }\n else {\n for (j=0;j #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " */\n istart = colstart; \n if (!tiling) {\n for (i=0; i #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j,it,jt) collapse(2)", "context_chars": 100, "text": " j++) {\n B(j,i) += A(i,j);\n A(i,j) += 1.0;\n\t}\n }\n else {\n#if COLLAPSE\n #else\n #pragma omp parallel for private (j,it,jt)\n\n for (i=0; i #pragma omp parallel for private (j,it,jt) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j,it,jt)", "context_chars": 100, "text": " }\n else {\n#if COLLAPSE\n #pragma omp parallel for private (j,it,jt) collapse(2)\n#else\n for (i=0; i #pragma omp parallel for private (j,it,jt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "COMM_WORLD, &recv_req); \n#endif\n\n istart = send_to*Block_order; \n if (!tiling) {\n for (i=0; i #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j,it,jt) collapse(2)", "context_chars": 100, "text": " Work_out(j,i) = A(i,j);\n A(i,j) += 1.0;\n\t }\n }\n else {\n#if COLLAPSE\n #else\n #pragma omp parallel for private (j,it,jt)\n\n for (i=0; i #pragma omp parallel for private (j,it,jt) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (j,it,jt)", "context_chars": 100, "text": " else {\n#if COLLAPSE\n #pragma omp parallel for private (j,it,jt) collapse(2)\n#else\n for (i=0; i #pragma omp parallel for private (j,it,jt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "cv_from*Block_order;\n /* scatter received block to transposed matrix; no need to tile */\n for (j=0; j #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Transpose/transpose.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "bserr = 0.0;\n istart = 0;\n double addit = ((double)(iterations+1) * (double) (iterations))/2.0;\n for (j=0;j #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Nstream/nstream.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "fset = %ld\\n\", offset);\n printf(\"Number of iterations = %d\\n\", iterations);\n }\n\n for (j=0; j #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Nstream/nstream.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "if (iter == 1) { \n MPI_Barrier(MPI_COMM_WORLD);\n local_nstream_time = wtime();\n }\n\n for (j=0; j #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Nstream/nstream.c", "omp_pragma_line": "#pragma omp parallel for simd reduction(+:asum)", "context_chars": 100, "text": "0; iter<=iterations; iter++) aj += bj+scalar*cj;\n \n aj = aj * (double) (length);\n \n asum = 0.0;\n for (j=0; j epsilon) {\n printf (\"Failed Validation on output array\\n\");\n#if !VERBOSE\n printf (\" Expected checksum: %f \\n\",aj);\n printf (\" Observed checksum: %f \\n\",asum);\n\n return (0);\n } #pragma omp parallel for simd reduction(+:asum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": ") (n-2*RADIUS);\n /* intialize the input and output arrays */\n for (j=jstart; j<=jend; j++) for (i=istart; i<=iend; i++) {\n IN(i,j) = COEFX*i+COEFY*j;\n OUT(i,j) = (DTYPE)0.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private (i, j, ii, jj)", "context_chars": 100, "text": "+) {\n IN(i,j) = left_buf_in[kk++];\n }\n }\n\n /* Apply the stencil operator */\n for (j=MAX(jstart,RADIUS); j<=MIN(n-RADIUS-1,jend); j++) {\n for (i=MAX(istart,RADIUS); i<=MIN(n-RADIUS-1,iend); i++) {\n #if LOOPGEN\n #include \"loop_body_star.incl\"\n #else\n for (jj=-RADIUS; jj<=RADIUS; jj++) OUT(i,j) += WEIGHT(0,jj)*IN(i,j+jj);\n for (ii=-RADIUS; ii<0; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j);\n for (ii=1; ii<=RADIUS; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j);\n \n }\n } #pragma omp parallel for private (i, j, ii, jj)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "i=1; ii<=RADIUS; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j);\n #endif\n }\n }\n\n /* add constant to solution to force refresh of neighbor data, if any */\n for (j=jstart; j<=jend; j++) for (i=istart; i<=iend; i++) IN(i,j)+= 1.0;\n\n }\n\n local_stencil_time = wtime() - local_stencil_time;\n MPI_Reduce(&local_stencil_time, &stencil_time, 1, MPI_DOUBLE, MPI_MAX, root,\n MPI_COMM_WORLD);\n\n /* compute L1 norm in parallel */\n local_norm = (DTYPE) 0.0;\n#pragma omp parallel for reduction(+:local_norm) private (i)\n for (j=MAX(jstart,RADIUS); j<=MIN(n-RADIUS-1,jend); j++) {\n for (i=MAX(istart,RADIUS); i<=MIN(n-RADIUS-1,iend); i++) {\n local_norm += (DTYPE)ABS(OUT(i,j));\n }\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ParRes/Kernels/MPIOPENMP/Stencil/stencil.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:local_norm) private (i)", "context_chars": 100, "text": "e L1 norm in parallel */\n local_norm = (DTYPE) 0.0;\nfor (j=MAX(jstart,RADIUS); j<=MIN(n-RADIUS-1,jend); j++) {\n for (i=MAX(istart,RADIUS); i<=MIN(n-RADIUS-1,iend); i++) {\n local_norm += (DTYPE)ABS(OUT(i,j));\n }\n } #pragma omp parallel for reduction(+:local_norm) private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/Bisection.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\n\t//omp_set_num_threads(nthreads);\n\t//omp_set_dynamic(0);\n\t//int threads = omp_get_max_threads();\n\tfor (int i = 0; i < nthreads; i++) {\n\t\t//std::cout << \"threads\" << omp_get_num_threads() << std::endl;\n\t\tint f = i * (n / nthreads);\n\t\tint l = (i + 1)*(n / nthreads) - 1;\n\t\tcompute_group_bisect(diagonal, offdiagonal, left_boundary, right_boundary, f, l, n, relative_tolerance, eigenvalues);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/JacobiAsync.cpp", "omp_pragma_line": "#pragma omp parallel for shared(distr_status, top, bot, S, U, processPointer) private(row,col) ", "context_chars": 100, "text": "j = 0; j < n - 1; j++)\n\t\t{\n\t\t\tdistr_status.clear();\n\t\t\ttoProcess.clear();\n\t\t\tprocessPointer = 0;\n\t\t\t//#pragma omp parallel for shared(top,bot) private(row,col)\n\t\t\tfor (int i = 0; i < S.size1() / 2; i++)\n\t\t\t{\n\t\t\t\t//cout << \"pair\" << endl << top << endl << bot << endl;\n\t\t\t\tstd::string num_thread_str = \"[\" + std::to_string(omp_get_thread_num()) + \"]\";\n\t\t\t\trow = std::max(top(i), bot(i)) - 1;\n\t\t\t\tcol = std::min(top(i), bot(i)) - 1;\n\t\t\t\tif (checkConjunction(distr_status, row) && checkConjunction(distr_status, col)) {\n\t\t\t\t\t//cout << \"d_status\" << num_thread_str << distr_status << endl;\n\t\t\t\t\tsetConjunction(distr_status, row);\n\t\t\t\t\tsetConjunction(distr_status, col);\n\t\t\t\t\trotateRowCol(S, U, col, row);\n\t\t\t\t\t//cout << num_thread_str << \"operated:\" << col << \" -col-row- \" << row << endl;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\ttoProcess(processPointer, 1) = row;\n\t\t\t\t\ttoProcess(processPointer, 0) = col;\n\t\t\t\t\t#pragma omp atomic\n\t\t\t\t\tprocessPointer++;\n\t\t\t\t\t//cout << num_thread_str << \"not operated:\" << col << \" -col-row- \" << row << endl;\n\t\t\t\t}\n\n\t\t\t} #pragma omp parallel for shared(distr_status, top, bot, S, U, processPointer) private(row,col) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/JacobiAsync.cpp", "omp_pragma_line": "#pragma omp parallel for shared(top,bot) private(row,col)", "context_chars": 100, "text": "lel for shared(distr_status, top, bot, S, U, processPointer) private(row,col) //num_threads(2)\n\t\t\t//for (int i = 0; i < S.size1() / 2; i++)\n\t\t\t{\n\t\t\t\t//cout << \"pair\" << endl << top << endl << bot << endl;\n\t\t\t\tstd::string num_thread_str = \"[\" + std::to_string(omp_get_thread_num()) + \"]\";\n\t\t\t\trow = std::max(top(i), bot(i)) - 1;\n\t\t\t\tcol = std::min(top(i), bot(i)) - 1;\n\t\t\t\tif (checkConjunction(distr_status, row) && checkConjunction(distr_status, col)) {\n\t\t\t\t\t//cout << \"d_status\" << num_thread_str << distr_status << endl;\n\t\t\t\t\tsetConjunction(distr_status, row);\n\t\t\t\t\tsetConjunction(distr_status, col);\n\t\t\t\t\trotateRowCol(S, U, col, row);\n\t\t\t\t\t//cout << num_thread_str << \"operated:\" << col << \" -col-row- \" << row << endl;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\ttoProcess(processPointer, 1) = row;\n\t\t\t\t\ttoProcess(processPointer, 0) = col;\n\t\t\t\t\t#pragma omp atomic\n\t\t\t\t\tprocessPointer++;\n\t\t\t\t\t//cout << num_thread_str << \"not operated:\" << col << \" -col-row- \" << row << endl;\n\t\t\t\t}\n\n\t\t\t} #pragma omp parallel for shared(top,bot) private(row,col)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/parallel_jacobi.h", "omp_pragma_line": "#pragma omp parallel for default(none) shared(mat, n, si, co, pe, isodd)", "context_chars": 100, "text": "\t\t\n\t\t//root.start();\n\t\twhile (not_converged) {\n\n\t\t\tfor (int set = 0; setfor (int k = 0; k #pragma omp parallel for default(none) shared(mat, n, si, co, pe, isodd)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/parallel_jacobi.h", "omp_pragma_line": "#pragma omp parallel for default(none) shared(mat, n, si, co, pe, isodd) ", "context_chars": 100, "text": "schur_new(mat, p, q, co[k], si[k]);\n\t\t\t\t\tpremultiply(mat, p, q, co[k], si[k]);\n\t\t\t\t}\n#ifdef omptest\nfor (int k = 0; k #pragma omp parallel for default(none) shared(mat, n, si, co, pe, isodd) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/JacobiIteration1.cpp", "omp_pragma_line": "#pragma omp parallel for shared(S) ", "context_chars": 100, "text": "ads();\n\tbool iterating = true;\n\n\twhile (iterating)\n\t{\n\t\tdebugMatrixForm(S);\n\t\tssteqr_debug(S);\n\n\t\t//for (int pp = 0; pp < n - 2; pp += 2) {\n\t\t\tint p = pp;\n\t\t\tint q = p + 1;\n\t\t\t////printf(\"In thread %d p = %d q = %d\\n\", omp_get_thread_num(), p, q);\n\t\t\t//std::cout << \"S(p, q)\" << S(p, q) << \" | \" << \"S(q, p)\" << S(q, p) << std::endl;\n\t\t\t//debugMatrixForm(S);\n\t\t\t//std::cout << p <<\" p|q \"<< q << std::endl;\n\n\t\t\trotateJacobi2(S, q, p);\n\t\t\tdebugMatrixForm(S);\n\t\t\tssteqr_debug(S);\n\n\t\t\t//std::cout << \"S(p, q)\" << S(p, q) << \" | \"<< \"S(q, p)\" << S(q, p) << std::endl;\n\t\t\t//debugMatrixForm(S);\n\t\t\t//std::cout << p << \" p|q \" << q << std::endl;\n\t\t\t//std::cout << \"====\" << std::endl;\n\t\t} #pragma omp parallel for shared(S) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/JacobiIteration1.cpp", "omp_pragma_line": "#pragma omp parallel for shared(S)", "context_chars": 100, "text": ";\n\t\t\t//std::cout << p << \" p|q \" << q << std::endl;\n\t\t\t//std::cout << \"====\" << std::endl;\n\t\t}\n\n\t\t//for (int pp = 1; pp < n - 1; pp += 2) {\n\t\t\tint p = pp;\n\t\t\tint q = p + 1;\n\t\t\t/*std::cout << \"S(p, q)\" << S(p, q) << \" | \" << \"S(q, p)\" << S(q, p) << std::endl;\n\t\t\tdebugMatrixForm(S);\n\t\t\tstd::cout << p <<\" p|q \"<< q << std::endl;*/\n\t\t\t\n\t\t\t\n\t\t\trotateJacobi2(S, q, p);\n\n\t\t/*\tstd::cout << \"S(p, q)\" << S(p, q) << \" | \"<< \"S(q, p)\" << S(q, p) << std::endl;\n\t\t\tdebugMatrixForm(S);\n\t\t\tstd::cout << p << \" p|q \" << q << std::endl;\n\t\t\tstd::cout << \"====\" << std::endl;*/\n\t\t} #pragma omp parallel for shared(S)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/JacobiEigenCode/parallel_jacobi.h", "omp_pragma_line": "#pragma omp parallel for default(none) shared(mat, n, si, co, pe, isodd)", "context_chars": 100, "text": "\t\t\n\t\t//root.start();\n\t\twhile (not_converged) {\n\n\t\t\tfor (int set = 0; setfor (int k = 0; k #pragma omp parallel for default(none) shared(mat, n, si, co, pe, isodd)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/JacobiEigenCode/parallel_jacobi.h", "omp_pragma_line": "#pragma omp parallel for default(none) shared(mat, n, si, co, pe, isodd) ", "context_chars": 100, "text": "schur_new(mat, p, q, co[k], si[k]);\n\t\t\t\t\tpremultiply(mat, p, q, co[k], si[k]);\n\t\t\t\t}\n#ifdef omptest\nfor (int k = 0; k #pragma omp parallel for default(none) shared(mat, n, si, co, pe, isodd) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/JacobiEigenCode/JacobiIteration1.cpp", "omp_pragma_line": "#pragma omp parallel for shared(S) ", "context_chars": 100, "text": "ads();\n\tbool iterating = true;\n\n\twhile (iterating)\n\t{\n\t\tdebugMatrixForm(S);\n\t\tssteqr_debug(S);\n\n\t\t//for (int pp = 0; pp < n - 2; pp += 2) {\n\t\t\tint p = pp;\n\t\t\tint q = p + 1;\n\t\t\t////printf(\"In thread %d p = %d q = %d\\n\", omp_get_thread_num(), p, q);\n\t\t\t//std::cout << \"S(p, q)\" << S(p, q) << \" | \" << \"S(q, p)\" << S(q, p) << std::endl;\n\t\t\t//debugMatrixForm(S);\n\t\t\t//std::cout << p <<\" p|q \"<< q << std::endl;\n\n\t\t\trotateJacobi2(S, q, p);\n\t\t\tdebugMatrixForm(S);\n\t\t\tssteqr_debug(S);\n\n\t\t\t//std::cout << \"S(p, q)\" << S(p, q) << \" | \"<< \"S(q, p)\" << S(q, p) << std::endl;\n\t\t\t//debugMatrixForm(S);\n\t\t\t//std::cout << p << \" p|q \" << q << std::endl;\n\t\t\t//std::cout << \"====\" << std::endl;\n\t\t} #pragma omp parallel for shared(S) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NikiforovAll/JacobiEigenvalueAlgorithm/JacobiEigenvalueAlgorithm/JacobiEigenCode/JacobiIteration1.cpp", "omp_pragma_line": "#pragma omp parallel for shared(S)", "context_chars": 100, "text": ";\n\t\t\t//std::cout << p << \" p|q \" << q << std::endl;\n\t\t\t//std::cout << \"====\" << std::endl;\n\t\t}\n\n\t\t//for (int pp = 1; pp < n - 1; pp += 2) {\n\t\t\tint p = pp;\n\t\t\tint q = p + 1;\n\t\t\t/*std::cout << \"S(p, q)\" << S(p, q) << \" | \" << \"S(q, p)\" << S(q, p) << std::endl;\n\t\t\tdebugMatrixForm(S);\n\t\t\tstd::cout << p <<\" p|q \"<< q << std::endl;*/\n\t\t\t\n\t\t\t\n\t\t\trotateJacobi2(S, q, p);\n\n\t\t/*\tstd::cout << \"S(p, q)\" << S(p, q) << \" | \"<< \"S(q, p)\" << S(q, p) << std::endl;\n\t\t\tdebugMatrixForm(S);\n\t\t\tstd::cout << p << \" p|q \" << q << std::endl;\n\t\t\tstd::cout << \"====\" << std::endl;*/\n\t\t} #pragma omp parallel for shared(S)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rshrc/parallel-processing/openmp/parallel_loops.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " main(int argc, char *argv[])\n{\n int arr[50000], count0 = 0, count1 = 0, count2 = 0, count3 = 0;\nfor (int i = 0; i < 50000; i++)\n {\n int thread_being_used = omp_get_thread_num();\n arr[i] = 5 * i;\n printf(\"Using Thread : %d\\n\", thread_being_used);\n if (thread_being_used == 0)\n count0++;\n else if (thread_being_used == 1)\n count1++;\n else if (thread_being_used == 2)\n count2++;\n else\n count3++;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/AyushBhandariNITK/Parallel_Graph_Coloring_Using_Openmp/coloring.cpp", "omp_pragma_line": "#pragma omp parallel for shared(allIsColored) reduction(+:num_colored)", "context_chars": 100, "text": "nd;\n}\n\nbool GraphColoring::allColored() const {\n bool allIsColored = true;\n uint num_colored = 0;\nfor (int i = 0; i < num_vertices; i++) {\n if (colors[i] == UNKNOWN) {\n allIsColored = false;\n }\n num_colored++;\n } #pragma omp parallel for shared(allIsColored) reduction(+:num_colored)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/AyushBhandariNITK/Parallel_Graph_Coloring_Using_Openmp/coloring.cpp", "omp_pragma_line": "#pragma omp parallel for reduction(+:false_colored)", "context_chars": 100, "text": "red++;\n }\n return allIsColored;\n}\n\nbool GraphColoring::accuracy() const {\n\tint false_colored = 0;\nfor (int v_i = 0; v_i < num_vertices; v_i++) {\n\t\tfor (int neighbor_ind = row_ptr[v_i]; neighbor_ind < row_ptr[v_i + 1]; neighbor_ind++) {\n\t\t const int neighbor = col_ind[neighbor_ind];\n\t\t\tif (colors[neighbor] == colors[v_i]) {\n\t\t\t\tfalse_colored++;\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for reduction(+:false_colored)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/AyushBhandariNITK/Parallel_Graph_Coloring_Using_Openmp/coloring.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "s\n// ========================================================\n\nvoid GraphColoring::assignColors() {\nfor (uint vertex_ind = 0; vertex_ind < unvisited_vertices_tail; vertex_ind++) {\n const uint vertex = unvisited_vertices[vertex_ind];\n colors[vertex] = getNextColor(vertex);\t\t\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/AyushBhandariNITK/Parallel_Graph_Coloring_Using_Openmp/coloring.cpp", "omp_pragma_line": "#pragma omp parallel for schedule(guided)", "context_chars": 100, "text": "ts() {\n\t// Traverse edges of the graph, for edges (u, v) where C[u] = C[v] reset color of min(u, v)\nfor (unsigned int vertex_ind = 0; vertex_ind < unvisited_vertices_tail; vertex_ind++) {\n const uint vertex = unvisited_vertices[vertex_ind];\n\t\tfor (unsigned int neighbor_ind = row_ptr[vertex]; neighbor_ind < row_ptr[vertex + 1]; neighbor_ind++) {\n\t\t const uint neighbor_label = col_ind[neighbor_ind];\n\t\t if (colors[neighbor_label] == colors[vertex]) {\n\t\t const uint minLabel = std::min(neighbor_label, vertex);\n\t\t colors[minLabel] = UNKNOWN;\t\t \n\t\t }\n\t\t}\n } #pragma omp parallel for schedule(guided)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/AyushBhandariNITK/Parallel_Graph_Coloring_Using_Openmp/coloring.cpp", "omp_pragma_line": "#pragma omp parallel for collapse(1)", "context_chars": 100, "text": "e color of the one having the color on the other\n\t// version 0.01 - linear search for such vertices\nfor (unsigned int vertex_ind = 0; vertex_ind < unvisited_vertices_tail; vertex_ind++) {\n const uint vertex = unvisited_vertices[vertex_ind];\n const uint vertex_color = colors[vertex];\n for (uint neighbor_ind = row_ptr[vertex]; neighbor_ind < row_ptr[vertex + 1]; neighbor_ind++) {\n const uint neighbor_label = col_ind[neighbor_ind];\n if (colors[neighbor_label] != UNKNOWN) {\n\tforbiddens[vertex].insert(colors[neighbor_label]);\n }\t\t \n }\n } #pragma omp parallel for collapse(1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/mcrat.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) private(angle_count)", "context_chars": 100, "text": "=(int) (theta_jmin); angle_count< (int) (theta_jmax+1) ;angle_count=angle_count+delta_theta )\n\n for (angle_count=0; angle_count< num_angles ;angle_count++ )\n {\n \n printf(\"%d\\t%lf\\n\", omp_get_thread_num(), delta_theta );\n double inj_radius;\n int frm2, frm0;\n char mc_filename[200]=\"\";\n char mc_filename_2[200]=\"\";\n char mc_operation[200]=\"\";\n char mc_dir[200]=\"\" ;\n int file_count = 0;\n DIR * dirp;\n struct dirent * entry;\n struct stat st = {0};\n double theta_jmin_thread=0, theta_jmax_thread=0;\n \n \n theta_jmin_thread= (*(thread_theta+angle_count))*(M_PI/180);//(*(thread_theta+omp_get_thread_num() ));\n theta_jmax_thread= ((*(thread_theta+angle_count))+delta_theta)*(M_PI/180);//(*(thread_theta+omp_get_thread_num()+1 ));\n printf(\"Thread %d: %0.1lf, %0.1lf \\n %d %d\\n\", omp_get_thread_num(), theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frm2_small, frm2_large );\n \n snprintf(mc_dir,sizeof(flash_prefix),\"%s%s%0.1lf-%0.1lf/\",FILEPATH,MC_PATH, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI ); //have to add angle into this\n \n printf(\">> Thread %d in MCRaT: I am working on path: %s \\n\",omp_get_thread_num(), mc_dir );\n \n if ((theta_jmin_thread >= 0) && (theta_jmax_thread <= (2*M_PI/180) )) //if within small angle (0-2 degrees) use _small inj_radius and frm2\n {\n inj_radius=inj_radius_small;\n frm2=frm2_small;\n frm0=frm0_small;\n }\n else\n {\n inj_radius=inj_radius_large;\n frm2=frm2_large;\n frm0=frm0_large;\n }\n printf(\"Thread %d: %0.1lf, %0.1lf \\n %d %e %d\\n\", omp_get_thread_num(), theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frm2, inj_radius, frm0 );\n\n //want to also have another set of threads that each has differing ranges of frame injections therefore do nested parallelism here so each thread can read its own checkpoint file\n //#pragma omp parallel num_threads(2) firstprivate(restrt)\n {\n char flash_file[200]=\"\";\n char log_file[200]=\"\";\n FILE *fPtr=NULL; //pointer to log file for each thread\n double *xPtr=NULL, *yPtr=NULL, *rPtr=NULL, *thetaPtr=NULL, *velxPtr=NULL, *velyPtr=NULL, *densPtr=NULL, *presPtr=NULL, *gammaPtr=NULL, *dens_labPtr=NULL;\n double *phiPtr=NULL, *velzPtr=NULL, *zPtr=NULL;\n double *szxPtr=NULL,*szyPtr=NULL, *tempPtr=NULL; //pointers to hold data from FLASH files\n int num_ph=0, array_num=0, ph_scatt_index=0, max_scatt=0, min_scatt=0,i=0; //number of photons produced in injection algorithm, number of array elleemnts from reading FLASH file, index of photon whch does scattering, generic counter\n double dt_max=0, thescatt=0, accum_time=0; \n double gamma_infinity=0, time_now=0, time_step=0, avg_scatt=0; //gamma_infinity not used?\n double ph_dens_labPtr=0, ph_vxPtr=0, ph_vyPtr=0, ph_tempPtr=0, ph_vzPtr=0;// *ph_cosanglePtr=NULL ;\n double min_r=0, max_r=0;\n int frame=0, scatt_frame=0, frame_scatt_cnt=0, scatt_framestart=0, framestart=0;\n struct photon *phPtr=NULL; //pointer to array of photons \n \n \n //if (omp_get_thread_num()==0)\n //{\n //printf( \"A:%d Im Thread: %d with ancestor %d working in %s\\n\", omp_get_num_threads(), omp_get_thread_num(), omp_get_ancestor_thread_num(1), mc_dir);\n //}\n \n if (restrt=='c')\n {\n printf(\">> mc.py: Reading checkpoint\\n\");\n //#pragma omp critical\n {\n readCheckpoint(mc_dir, &phPtr, frm0, &framestart, &scatt_framestart, &num_ph, &restrt, &time_now);\n \n /*\n for (i=0;ip0, (phPtr+i)->p1, (phPtr+i)->p2, (phPtr+i)->p3, (phPtr+i)->r0, (phPtr+i)->r1, (phPtr+i)->r2, (phPtr+i)->num_scatt );\n }\n */\n if (restrt=='c')\n {\n printf(\">> Thread %d with ancestor %d: Starting from photons injected at frame: %d out of %d\\n\", omp_get_thread_num(), omp_get_ancestor_thread_num(1),framestart, frm2);\n printf(\">> Thread %d with ancestor %d: Continuing scattering %d photons from frame: %d\\n\", omp_get_thread_num(), omp_get_ancestor_thread_num(1),num_ph, scatt_framestart);\n printf(\">> Thread %d with ancestor %d: The time now is: %e\\n\", omp_get_thread_num(), omp_get_ancestor_thread_num(1),time_now);\n }\n else\n {\n printf(\">> Thread %d with ancestor %d: Continuing simulation by injecting photons at frame: %d out of %d\\n\", omp_get_thread_num(), omp_get_ancestor_thread_num(1),framestart, frm2); //starting with new photon injection is same as restarting sim\n }\n }\n }\n else if (stat(mc_dir, &st) == -1)\n {\n mkdir(mc_dir, 0777); //make the directory with full permissions\n \n framestart=frm0; //if restarting then start from parameters given in mc.par file\n scatt_framestart=frm0;\n }\n else \n {\n //remove everything from MC directory to ensure no corruption of data if theres other files there besides the mc.par file\n //for a checkpoint implementation, need to find the latest checkpoint file and read it and not delete the files\n #pragma omp critical\n {\n printf(\">> Thread %d with ancestor %d: Cleaning directory \\n\",omp_get_thread_num(), omp_get_ancestor_thread_num(1));\n dirp = opendir(mc_dir);\n while ((entry = readdir(dirp)) != NULL) \n {\n if (entry->d_type == DT_REG) { /* If the entry is a regular file */\n file_count++; //count how many files are in dorectory\n }\n }\n printf(\"File count %d\\n\", file_count);\n \n if (file_count>0)\n {\n for (i=0;i<=last_frm;i++)\n {\n \n snprintf(mc_filename,sizeof(mc_filename),\"%s%s%d%s\", mc_dir,\"mcdata_\",i,\"_P0.dat\");\n if(( access( mc_filename, F_OK ) != -1 ) )\n {\n snprintf(mc_operation,sizeof(flash_prefix),\"%s%s%s%d%s\",\"exec rm \", mc_dir,\"mcdata_\",i,\"_*.dat\"); //prepares string to remove *.dat in mc_dir\n printf(\"%s\\n\",mc_operation);\n system(mc_operation);\n \n //snprintf(mc_operation,sizeof(flash_prefix),\"%s%s%s%d%s\",\"exec rm \", mc_dir,\"mcdata_\",i,\"_*\");\n //system(mc_operation);\n }\n }\n //snprintf(mc_operation,sizeof(flash_prefix),\"%s%s%s\",\"exec rm \", mc_dir,\"mcdata_PW_*.dat\"); //prepares string to remove *.dat in mc_dir\n //system(mc_operation);\n \n snprintf(mc_operation,sizeof(flash_prefix),\"%s%s%s\",\"exec rm \", mc_dir,\"mcdata_PW.dat\"); //prepares string to remove *.dat in mc_dir\n system(mc_operation);\n \n //snprintf(mc_operation,sizeof(flash_prefix),\"%s%s%s\",\"exec rm \", mc_dir,\"mc_chkpt_*.dat\"); //prepares string to remove *.dat in mc_dir\n //system(mc_operation);\n \n snprintf(mc_operation,sizeof(flash_prefix),\"%s%s%s\",\"exec rm \", mc_dir,\"mc_output_*.log\"); //prepares string to remove *.log in mc_dir\n system(mc_operation);\n \n }\n }\n framestart=frm0; //if restarting then start from parameters given in mc.par file\n scatt_framestart=frm0;\n \n }\n \n dt_max=1.0/fps;\n //#pragma omp barrier\n \n snprintf(log_file,sizeof(log_file),\"%s%s\",mc_dir,\"mc_output.log\" );\n printf(\"%s\\n\",log_file);\n fPtr=fopen(log_file, \"w\");\n \n fprintf(fPtr, \"%d Im Thread: %d with ancestor %d Starting on Frame: %d scatt_framestart: %d\\n\", omp_get_num_threads(), omp_get_thread_num(), omp_get_ancestor_thread_num(1), framestart, scatt_framestart);\n fflush(fPtr);\n //fclose(fPtr);\n //#pragma omp barrier\n //exit(0);\n //loop over frames \n //for a checkpoint implementation, start from the last saved \"frame\" value and go to the saved \"frm2\" value\n \n //#pragma omp for \n for (frame=framestart;frame<=frm2;frame++)\n {\n if (restrt=='r')\n {\n time_now=frame/fps; //for a checkpoint implmentation, load the saved \"time_now\" value when reading the ckeckpoint file otherwise calculate it normally\n }\n \n //printf(\">> mc.py: Working on Frame %d\\n\", frame);\n fprintf(fPtr,\"%d Im Thread: %d with ancestor %d Working on Frame: %d\\n\", omp_get_num_threads(), omp_get_thread_num(), omp_get_ancestor_thread_num(1), frame);\n fflush(fPtr);\n \n if (restrt=='r')\n {\n \n //exit(0);\n //read in FLASH file\n //for a checkpoint implmentation, dont need to read the file yet\n if (dim_switch==0)\n {\n //put proper number at the end of the flash file \n modifyFlashName(flash_file, flash_prefix, frame, dim_switch);\n fprintf(fPtr,\">> Im Thread: %d with ancestor %d: Opening FLASH file %s\\n\",omp_get_thread_num(), omp_get_ancestor_thread_num(1), flash_file);\n fflush(fPtr);\n \n #pragma omp critical\n {\n readAndDecimate(flash_file, inj_radius, fps, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\\\n &thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, fPtr);\n }\n }\n else\n {\n read_hydro(FILEPATH, frame, inj_radius, &xPtr, &yPtr, &zPtr, &szxPtr, &szyPtr, &rPtr,\\\n &thetaPtr, &phiPtr, &velxPtr, &velyPtr, &velzPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, fps, fPtr);\n }\n \n //check for run type\n if(strcmp(cyl, this_run)==0)\n {\n //printf(\"In cylindrical prep\\n\");\n cylindricalPrep(gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num);\n }\n else if (strcmp(sph, this_run)==0)\n {\n sphericalPrep(rPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num );\n }\n \n //determine where to place photons and how many should go in a given place\n //for a checkpoint implmentation, dont need to inject photons, need to load photons' last saved data \n fprintf(fPtr,\">> Thread: %d with ancestor %d: Injecting photons\\n\",omp_get_thread_num(), omp_get_ancestor_thread_num(1));\n fflush(fPtr);\n \n if (dim_switch==0)\n {\n photonInjection(&phPtr, &num_ph, inj_radius, ph_weight_suggest, min_photons, max_photons,spect, array_num, fps, theta_jmin_thread, theta_jmax_thread, xPtr, yPtr, szxPtr, szyPtr,rPtr,thetaPtr, tempPtr, velxPtr, velyPtr,rng[omp_get_thread_num()] ); \n }\n else\n {\n photonInjection3D(&phPtr, &num_ph, inj_radius, ph_weight_suggest, min_photons, max_photons,spect, array_num, fps, theta_jmin_thread, theta_jmax_thread, xPtr, yPtr, zPtr, szxPtr, szyPtr,rPtr,thetaPtr, phiPtr, tempPtr, velxPtr, velyPtr, velzPtr, rng[omp_get_thread_num()] ); \n }\n \n //printf(\"%d\\n\",num_ph); //num_ph is one more photon than i actually have\n /*\n for (i=0;ir0, (phPtr+i)->r1, (phPtr+i)->r2 );\n */\n }\n \n //scatter photons all the way thoughout the jet\n //for a checkpoint implmentation, start from the last saved \"scatt_frame\" value eh start_frame=frame or start_frame=cont_frame\n if (restrt=='r')\n {\n scatt_framestart=frame; //have to make sure that once the inner loop is done and the outer loop is incrememnted by one the inner loop starts at that new value and not the one read by readCheckpoint()\n }\n \n for (scatt_frame=scatt_framestart;scatt_frame<=last_frm;scatt_frame++)\n {\n fprintf(fPtr,\">>\\n\");\n fprintf(fPtr,\">> Thread %d with ancestor %d : Working on photons injected at frame: %d out of %d\\n\", omp_get_thread_num(), omp_get_ancestor_thread_num(1),frame, frm2);\n fprintf(fPtr,\">> Thread %d with ancestor %d: %s - Working on frame %d\\n\",omp_get_thread_num(), omp_get_ancestor_thread_num(1), THISRUN, scatt_frame);\n fprintf(fPtr,\">> Thread %d with ancestor %d: Opening file...\\n\", omp_get_thread_num(), omp_get_ancestor_thread_num(1));\n fflush(fPtr);\n \n \n \n \n if (dim_switch==0)\n {\n //put proper number at the end of the flash file\n modifyFlashName(flash_file, flash_prefix, scatt_frame, dim_switch);\n #pragma omp critical\n {\n phMinMax(phPtr, num_ph, &min_r, &max_r);\n readAndDecimate(flash_file, inj_radius, fps, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\\\n &thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, fPtr);\n }\n }\n else\n {\n phMinMax(phPtr, num_ph, &min_r, &max_r);\n \n read_hydro(FILEPATH, scatt_frame, inj_radius, &xPtr, &yPtr, &zPtr, &szxPtr, &szyPtr, &rPtr,\\\n &thetaPtr, &phiPtr, &velxPtr, &velyPtr, &velzPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, fps, fPtr);\n }\n \n //check for run type\n if(strcmp(cyl, this_run)==0)\n {\n //printf(\"In cylindrical prep\\n\");\n cylindricalPrep(gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num);\n }\n else if (strcmp(sph, this_run)==0)\n {\n sphericalPrep(rPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num );\n }\n //printf(\"The result of read and decimate are arrays with %d elements\\n\", array_num);\n \n fprintf(fPtr,\">> Thread %d with ancestor %d: propagating and scattering %d photons\\n\", omp_get_thread_num(), omp_get_ancestor_thread_num(1),num_ph);\n fflush(fPtr);\n \n frame_scatt_cnt=0;\n while (time_now<((scatt_frame+1)/fps))\n {\n //if simulation time is less than the simulation time of the next frame, keep scattering in this frame\n //go through each photon and find blocks closest to each photon and properties of those blocks to calulate mean free path\n //and choose the photon with the smallest mfp and calculate the timestep\n \n ph_scatt_index=findNearestPropertiesAndMinMFP(phPtr, num_ph, array_num, &time_step, xPtr, yPtr, zPtr, velxPtr, velyPtr, velzPtr, dens_labPtr, tempPtr,\\\n &ph_dens_labPtr, &ph_vxPtr, &ph_vyPtr, &ph_vzPtr, &ph_tempPtr, rng[omp_get_thread_num()], dim_switch);\n \n //printf(\"In main: %e, %d, %e, %e\\n\", *(ph_num_scatt+ph_scatt_index), ph_scatt_index, time_step, time_now);\n printf(\"In main: %e, %d, %e, %e\\n\",((phPtr+ph_scatt_index)->num_scatt), ph_scatt_index, time_step, time_now);\n \n if (time_stepnum_scatt)+=1;\n frame_scatt_cnt+=1;\n time_now+=time_step;\n \n updatePhotonPosition(phPtr, num_ph, time_step);\n \n //scatter the photon\n //printf(\"Passed Parameters: %e, %e, %e\\n\", (ph_vxPtr), (ph_vyPtr), (ph_tempPtr));\n\n photonScatter( (phPtr+ph_scatt_index), (ph_vxPtr), (ph_vyPtr), ph_vzPtr, (ph_tempPtr), rng[omp_get_thread_num()] , dim_switch, fPtr);\n \n \n //if (frame_scatt_cnt%1000 == 0)\n {\n fprintf(fPtr,\"Scattering Number: %d\\n\", frame_scatt_cnt);\n fprintf(fPtr,\"The local temp is: %e\\n\", (ph_tempPtr));\n fprintf(fPtr,\"Average photon energy is: %e\\n\", averagePhotonEnergy(phPtr, num_ph)); //write function to average over the photons p0 and then do (*3e10/1.6e-9)\n fflush(fPtr);\n }\n \n }\n else\n {\n time_now+=dt_max;\n \n //for each photon update its position based on its momentum\n \n updatePhotonPosition(phPtr, num_ph, dt_max);\n }\n \n //printf(\"In main 2: %e, %d, %e, %e\\n\", ((phPtr+ph_scatt_index)->num_scatt), ph_scatt_index, time_step, time_now);\n\n }\n \n //get scattering statistics\n phScattStats(phPtr, num_ph, &max_scatt, &min_scatt, &avg_scatt);\n \n fprintf(fPtr,\"The number of scatterings in this frame is: %d\\n\", frame_scatt_cnt);\n fprintf(fPtr,\"The last time step was: %lf.\\nThe time now is: %lf\\n\", time_step,time_now);\n fprintf(fPtr,\"The maximum number of scatterings for a photon is: %d\\nThe minimum number of scattering for a photon is: %d\\n\", max_scatt, min_scatt);\n fprintf(fPtr,\"The average number of scatterings thus far is: %lf\\n\", avg_scatt);\n fflush(fPtr);\n \n printPhotons(phPtr, num_ph, scatt_frame , frame, mc_dir);\n exit(0);\n //for a checkpoint implmentation,save the checkpoint file here after every 5 frames or something\n //save the photons data, the scattering number data, the scatt_frame value, and the frame value\n //WHAT IF THE PROGRAM STOPS AFTER THE LAST SCATT_FRAME, DURING THE FIRST SCATT_FRAME OF NEW FRAME VARIABLE - save restrt variable as 'r'\n fprintf(fPtr, \">> Thread %d with ancestor %d: Making checkpoint file\\n\", omp_get_thread_num(), omp_get_ancestor_thread_num(1));\n fflush(fPtr);\n saveCheckpoint(mc_dir, frame, scatt_frame, num_ph, time_now, phPtr, last_frm);\n \n free(xPtr);free(yPtr);free(szxPtr);free(szyPtr);free(rPtr);free(thetaPtr);free(velxPtr);free(velyPtr);free(densPtr);free(presPtr);\n free(gammaPtr);free(dens_labPtr);free(tempPtr);\n xPtr=NULL; yPtr=NULL; rPtr=NULL;thetaPtr=NULL;velxPtr=NULL;velyPtr=NULL;densPtr=NULL;presPtr=NULL;gammaPtr=NULL;dens_labPtr=NULL;\n szxPtr=NULL; szyPtr=NULL; tempPtr=NULL;\n }\n restrt='r';//set this to make sure that the next iteration of propogating photons doesnt use the values from the last reading of the checkpoint file\n free(phPtr); \n phPtr=NULL;\n \n } \n \n \n }//end omp parallel inner section\n \n //merge files from each worker thread within a directory\n //dirFileMerge(mc_dir, frm0, last_frm);\n \n } #pragma omp parallel for num_threads(num_thread) private(angle_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nt)", "context_chars": 100, "text": " rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *)); \n rng[0]=rand;\n\n //for(i=1;i #pragma omp parallel for num_threads(nt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate( ph_x, ph_y, ph_z, ph_phi, dist_min, dist, j, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp ) ", "context_chars": 100, "text": "mes the min (max) photon radius\n //or just parallelize this part here\n \n min_mfp=1e12;\n for (i=0;ir0), ((ph+i)->r1));\n if (dim_switch_3d==0)\n {\n ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate\n ph_y=((ph+i)->r2);\n }\n else\n {\n ph_x=((ph+i)->r0);\n ph_y=((ph+i)->r1);\n ph_z=((ph+i)->r2);\n }\n //printf(\"ph_x:%e, ph_y:%e\\n\", ph_x, ph_y);\n ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));\n \n dist_min=1e12;//set dist to impossible value to make sure at least first distance calulated is saved \n block_dist=3e9;\n while (dist_min==1e12) //if this is true, then the algorithm hasnt found blocks within the acceptable range given by block_dist\n {\n \n for(j=0;jp1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5);\n \n //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product\n (n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined\n \n if (dim_switch_3d==0)\n {\n beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)),0.5);\n }\n else\n {\n beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)+pow((n_vz_tmp),2)),0.5);\n }\n //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case\n rnd_tracker=0;\n //while ((rnd_tracker<=0) || (rnd_tracker>=1))\n //{\n rnd_tracker=gsl_rng_uniform_pos(rng[omp_get_thread_num()]);\n //rnd_tracker=gsl_rng_uniform_pos(rand);\n //printf(\"Rnd_tracker: %e Thread number %d \\n\",rnd_tracker, omp_get_thread_num() );\n //}\n mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOMP_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths \n //if (mfp<0)\n //{\n //printf(\"\\nThread: %d Photon: %d mfp: %e cos_angle: %e beta: %e dens_lab: %e rnd_tracker: %e\\n\\n\",omp_get_thread_num(), i, mfp, n_cosangle , beta,n_dens_lab_tmp, rnd_tracker );\n //}\n \n #pragma omp critical \n if ( mfp #pragma omp parallel for num_threads(num_thread) firstprivate( ph_x, ph_y, ph_z, ph_phi, dist_min, dist, j, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/PARALLELIZE/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nt)", "context_chars": 100, "text": ")); //minus 1 because master thread already has rand initalized\n rng[0]=rand;\n\n //for(i=1;i #pragma omp parallel for num_threads(nt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/PARALLELIZE/mclib.c", "omp_pragma_line": "#pragma omp parallel for firstprivate( ph_x, ph_y, ph_phi, dist_min, dist, j, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )", "context_chars": 100, "text": "n (max) photon radius\n //or just parallelize this part here\n \n \n \n min_mfp=1e12;\n for (i=0;ir0), ((ph+i)->r1));\n ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate\n ph_y=((ph+i)->r2);\n //printf(\"ph_x:%e, ph_y:%e\\n\", ph_x, ph_y);\n ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));\n \n dist_min=1e12;//set dist to impossible value to make sure at least first distance calulated is saved\n for(j=0;jp1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5);\n \n //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product\n (n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined\n \n beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)),0.5);\n //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case\n rnd_tracker=0;\n //while ((rnd_tracker<=0) || (rnd_tracker>=1))\n //{\n rnd_tracker=gsl_rng_uniform_pos(rng[omp_get_thread_num()]);\n //printf(\"Rnd_tracker: %e Thread number %d \\n\",rnd_tracker, omp_get_thread_num() );\n //}\n mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOMP_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths \n //if (mfp<0)\n //{\n //printf(\"\\nThread: %d Photon: %d mfp: %e cos_angle: %e beta: %e dens_lab: %e rnd_tracker: %e\\n\\n\",omp_get_thread_num(), i, mfp, n_cosangle , beta,n_dens_lab_tmp, rnd_tracker );\n //}\n \n #pragma omp critical \n if ( mfp #pragma omp parallel for firstprivate( ph_x, ph_y, ph_phi, dist_min, dist, j, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/HYBRID_PARALLEL/mclib_3d.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate(ph_r, ph_theta) reduction(min:temp_r_min) reduction(max:temp_r_max) reduction(min:temp_theta_min) reduction(max:temp_theta_max)", "context_chars": 100, "text": "eta_min=DBL_MAX;\n int i=0, num_thread=omp_get_num_threads();\n double ph_r=0, ph_theta=0;\n \nfor (i=0;iweight != 0)\n {\n ph_r=pow(pow( ((ph+i)->r0), 2.0) + pow(((ph+i)->r1),2.0 ) + pow(((ph+i)->r2) , 2.0),0.5);\n ph_theta=acos(((ph+i)->r2) /ph_r); //this is the photons theta psition in the FLASH grid, gives in radians\n if (ph_r > temp_r_max )\n {\n temp_r_max=ph_r;\n //fprintf(fPtr, \"The new max is: %e from photon %d with x: %e y: %e z: %e\\n\", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2);\n }\n \n //if ((i==0) || (ph_rr0), (ph+i)->r1, (ph+i)->r2);\n }\n \n if (ph_theta > temp_theta_max )\n {\n temp_theta_max=ph_theta;\n //fprintf(fPtr, \"The new max is: %e from photon %d with x: %e y: %e z: %e\\n\", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2);\n }\n \n //if ((i==0) || (ph_rr0), (ph+i)->r1, (ph+i)->r2);\n }\n }\n } #pragma omp parallel for num_threads(num_thread) firstprivate(ph_r, ph_theta) reduction(min:temp_r_min) reduction(max:temp_r_max) reduction(min:temp_theta_min) reduction(max:temp_theta_max)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/HYBRID_PARALLEL/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nt)", "context_chars": 100, "text": " rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *)); \n rng[0]=rand;\n\n //for(i=1;i #pragma omp parallel for num_threads(nt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/HYBRID_PARALLEL/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate( is_in_block, ph_block_index, ph_r, ph_x, ph_y, ph_z, ph_phi, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )", "context_chars": 100, "text": " parallelize this part here\n all_time_steps=malloc(num_ph*sizeof(double));\n min_mfp=1e12;\n for (i=0;ir0), ((ph+i)->r1));\n if (find_nearest_block_switch==0)\n {\n ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault\n }\n else\n {\n ph_block_index=0; //if starting a new frame set index=0 to avoid this issue\n }\n \n if (dim_switch_3d==0)\n {\n ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate\n ph_y=((ph+i)->r2);\n ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));\n ph_r=pow(ph_x*ph_x + ph_y*ph_y, 0.5);\n }\n else\n {\n ph_x=((ph+i)->r0);\n ph_y=((ph+i)->r1);\n ph_z=((ph+i)->r2);\n ph_r=pow(ph_x*ph_x + ph_y*ph_y+ph_z*ph_z, 0.5);\n }\n //if the location of the photon is less than the domain of the hydro simulation then do all of this, otherwise assing huge mfp value so no scattering occurs and the next frame is loaded\n if ((ph_ynearest_block_index=min_index; //save the index\n \n }\n\n //fprintf(fPtr,\"Outside\\n\");\n \n //save values\n (n_dens_lab_tmp)= (*(dens_lab+min_index));\n (n_vx_tmp)= (*(velx+min_index));\n (n_vy_tmp)= (*(vely+min_index));\n (n_temp_tmp)= (*(temp+min_index));\n if (dim_switch_3d==1)\n {\n (n_vz_tmp)= (*(velz+min_index));\n }\n \n if (dim_switch_3d==0)\n {\n fl_v_x=(*(velx+min_index))*cos(ph_phi);\n fl_v_y=(*(velx+min_index))*sin(ph_phi);\n fl_v_z=(*(vely+min_index));\n }\n else\n {\n fl_v_x=(*(velx+min_index));\n fl_v_y=(*(vely+min_index));\n fl_v_z=(*(velz+min_index));\n }\n \n fl_v_norm=pow(pow(fl_v_x, 2.0)+pow(fl_v_y, 2.0)+pow(fl_v_z, 2.0), 0.5);\n ph_v_norm=pow(pow(((ph+i)->p1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5);\n \n //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product\n (n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined\n \n if (dim_switch_3d==0)\n {\n beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)),0.5);\n }\n else\n {\n beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)+pow((n_vz_tmp),2)),0.5);\n }\n //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case\n rnd_tracker=0;\n \n rnd_tracker=gsl_rng_uniform_pos(rng[omp_get_thread_num()]);\n //printf(\"Rnd_tracker: %e Thread number %d \\n\",rnd_tracker, omp_get_thread_num() );\n \n mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOMP_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths \n }\n else\n {\n mfp=min_mfp;\n //printf(\"In ELSE\\n\");\n }\n \n *(all_time_steps+i)=mfp/C_LIGHT;\n } #pragma omp parallel for num_threads(num_thread) firstprivate( is_in_block, ph_block_index, ph_r, ph_x, ph_y, ph_z, ph_phi, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/HYBRID_PARALLEL/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nt)", "context_chars": 100, "text": " rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *)); \n rng[0]=rand;\n\n //for(i=1;i #pragma omp parallel for num_threads(nt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/HYBRID_PARALLEL/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate( r, theta,dv, v, all_adjacent_block_indexes, j, left_block_index, right_block_index, top_block_index, bottom_block_index, is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )", "context_chars": 100, "text": "mes the min (max) photon radius\n //or just parallelize this part here\n \n min_mfp=1e12;\n for (i=0;ir0), ((ph+i)->r1));\n if (find_nearest_block_switch==0)\n {\n ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault\n }\n else\n {\n ph_block_index=0; //if starting a new frame set index=0 to avoid this issue\n }\n \n if (dim_switch_3d==0)\n {\n ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate\n ph_y=((ph+i)->r2);\n ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));\n \n }\n else\n {\n ph_x=((ph+i)->r0);\n ph_y=((ph+i)->r1);\n ph_z=((ph+i)->r2);\n \n }\n //printf(\"ph_x:%e, ph_y:%e\\n\", ph_x, ph_y);\n \n is_in_block=checkInBlock(ph_block_index, ph_x, ph_y, ph_z, x, y, z, szx, szy, dim_switch_3d, riken_switch);\n \n if (find_nearest_block_switch==0 && is_in_block)\n {\n //keep the saved grid index\n min_index=ph_block_index;\n }\n else\n {\n //find the new index of the block closest to the photon\n //min_index=findNearestBlock(array_num, ph_x, ph_y, ph_z, x, y, z, dim_switch_3d); //stop doing this one b/c nearest grid could be one that the photon isnt actually in due to adaptive mesh\n \n //find the new index of the block that the photon is actually in\n min_index=findContainingBlock(array_num, ph_x, ph_y, ph_z, x, y, z, szx, szy, dim_switch_3d, riken_switch, fPtr);\n \n (ph+i)->nearest_block_index=min_index; //save the index\n \n }\n \n //look for the blocks surounding the block of interest and order them by the \n left_dist_min=1e15;//set dist to impossible value to make sure at least first distance calulated is saved\n right_dist_min=1e15;\n top_dist_min=1e15;\n bottom_dist_min=1e15;\n for (j=0;j(*(x+min_index)) && (dist < right_dist_min))\n {\n right_block_index=j;\n right_dist_min=dist;\n }\n \n if ((*(y+j))<(*(y+min_index)) && (dist < bottom_dist_min) )\n {\n bottom_block_index=j;\n bottom_dist_min=dist;\n }\n else if ((*(y+j))>(*(y+min_index)) && (dist < top_dist_min) )\n {\n top_block_index=j;\n top_dist_min=dist;\n }\n \n }\n all_adjacent_block_indexes[0]=left_block_index;\n all_adjacent_block_indexes[1]=right_block_index;\n all_adjacent_block_indexes[2]=bottom_block_index;\n all_adjacent_block_indexes[3]=top_block_index; \n \n //do a weighted average of the 4 nearest grids based on volume\n v=0;\n (n_dens_lab_tmp)=0;\n (n_vx_tmp)= 0;\n (n_vy_tmp)= 0;\n (n_temp_tmp)= 0;\n (n_vz_tmp)= 0;\n \n for (j=0;j<4;j++)\n {\n if (riken_switch==0)\n {\n //using FLASH\n dv=2.0*M_PI*(*(x+all_adjacent_block_indexes[j]))*pow(*(szx+all_adjacent_block_indexes[j]),2.0) ; \n }\n else\n {\n r=pow(pow((*(x+all_adjacent_block_indexes[j])),2.0)+pow((*(y+all_adjacent_block_indexes[j])),2.0), 0.5);\n theta=atan2((*(x+all_adjacent_block_indexes[j])), (*(y+all_adjacent_block_indexes[j])));\n dv=2.0*M_PI*pow(r,2)*sin(theta)*(*(szx+all_adjacent_block_indexes[j]))*(*(szy+all_adjacent_block_indexes[j])) ; \n }\n v+=dv;\n \n //save values\n (n_dens_lab_tmp)+= (*(dens_lab+all_adjacent_block_indexes[j]))*dv;\n (n_vx_tmp)+= (*(velx+all_adjacent_block_indexes[j]))*dv;\n (n_vy_tmp)+= (*(vely+all_adjacent_block_indexes[j]))*dv;\n (n_temp_tmp)+= (*(temp+all_adjacent_block_indexes[j]))*dv;\n if (dim_switch_3d==1)\n {\n (n_vz_tmp)+= (*(velz+all_adjacent_block_indexes[j]))*dv;\n }\n \n }\n \n\n //fprintf(fPtr,\"Outside\\n\");\n \n //save values\n (n_dens_lab_tmp)/= v;\n (n_vx_tmp)/= v;\n (n_vy_tmp)/= v;\n (n_temp_tmp)/= v;\n if (dim_switch_3d==1)\n {\n (n_vz_tmp)/= v;\n }\n \n if (dim_switch_3d==0)\n {\n fl_v_x=n_vx_tmp*cos(ph_phi);\n fl_v_y=n_vx_tmp*sin(ph_phi);\n fl_v_z=n_vy_tmp;\n }\n else\n {\n fl_v_x=n_vx_tmp;\n fl_v_y=n_vy_tmp;\n fl_v_z=n_vz_tmp;\n }\n \n fl_v_norm=pow(pow(fl_v_x, 2.0)+pow(fl_v_y, 2.0)+pow(fl_v_z, 2.0), 0.5);\n ph_v_norm=pow(pow(((ph+i)->p1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5);\n \n //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product\n (n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined\n \n if (dim_switch_3d==0)\n {\n beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)),0.5);\n }\n else\n {\n beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)+pow((n_vz_tmp),2)),0.5);\n }\n //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case\n rnd_tracker=0;\n \n rnd_tracker=gsl_rng_uniform_pos(rng[omp_get_thread_num()]);\n \n mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOMP_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths \n \n \n #pragma omp critical \n if ( mfp #pragma omp parallel for num_threads(num_thread) firstprivate( r, theta,dv, v, all_adjacent_block_indexes, j, left_block_index, right_block_index, top_block_index, bottom_block_index, is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/HYBRID_PARALLEL/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate(old_position, new_position, divide_p0)", "context_chars": 100, "text": "thread=omp_get_num_threads();\n double old_position=0, new_position=0, divide_p0=0;\n \n \n for (i=0;ir0,2)+pow(ph->r1,2)+pow(ph->r2,2), 0.5 );\n \n divide_p0=1.0/((ph+i)->p0);\n \n ((ph+i)->r0)+=((ph+i)->p1)*divide_p0*C_LIGHT*t; //update x position\n \n ((ph+i)->r1)+=((ph+i)->p2)*divide_p0*C_LIGHT*t;//update y\n \n ((ph+i)->r2)+=((ph+i)->p3)*divide_p0*C_LIGHT*t;//update z\n \n new_position= pow( pow(ph->r0,2)+pow(ph->r1,2)+pow(ph->r2,2), 0.5 );\n \n if ((new_position-old_position)/t > C_LIGHT)\n {\n fprintf(fPtr, \"PHOTON NUMBER %d IS SUPERLUMINAL. ITS SPEED IS %e c.\\n\", i, ((new_position-old_position)/t)/C_LIGHT);\n }\n //printf(\"In update function: %e, %e, %e, %e, %e, %e, %e\\n\",((ph+i)->r0), ((ph+i)->r1), ((ph+i)->r2), t, ((ph+i)->p1)/((ph+i)->p0), ((ph+i)->p2)/((ph+i)->p0), ((ph+i)->p3)/((ph+i)->p0) ); \n } #pragma omp parallel for num_threads(num_thread) firstprivate(old_position, new_position, divide_p0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/OLDER_MCRaT_VERSIONS/HYBRID_PARALLEL/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate( filename_k, file_no_thread_num, cmd,mcdata_type,num_files, increment ) private(i,j,k)", "context_chars": 100, "text": "000]=\"\", cmd[2000]=\"\", mcdata_type[200]=\"\";\n \n //printf(\"Merging files in %s\\n\", dir); \n //// i < last frame because calculation before this function gives last_frame as the first frame of the next process set of frames to merge files for\n for (i=start_frame;i=3000))\n {\n increment=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1\n }\n \n for (j=0;j> \", file_no_thread_num);\n system(cmd);\n }\n \n //remove file\n snprintf(cmd, sizeof(cmd), \"%s%s\", \"rm \", filename_k);\n system(cmd);\n \n }\n \n \n }\n } #pragma omp parallel for num_threads(num_thread) firstprivate( filename_k, file_no_thread_num, cmd,mcdata_type,num_files, increment ) private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mc_cyclosynch.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) reduction(+:block_cnt)", "context_chars": 100, "text": "fprintf(fPtr, \"rmin %e rmax %e, theta min/max: %e %e\\n\", rmin, rmax, theta_min, theta_max);\n for(i=0;inum_elements;i++)\n {\n //look at all boxes in width delta r=c/fps and within angles we are interested in NEED TO IMPLEMENT\n #if DIMENSIONS == THREE\n //want inner corner to be close to origin, therfore ned to have abs for 3D cartesian with negative coordinates, shouldnt affect the other geometry systems since theyre all defined from r=0, theta=0, phi=0\n hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, fabs((hydro_data->r0)[i])-0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])-0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])-0.5*(hydro_data->r2_size)[i]);\n hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, fabs((hydro_data->r0)[i])+0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])+0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])+0.5*(hydro_data->r2_size)[i]);\n #else\n hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], 0);\n hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], 0);\n \n\n if ((rmin <= r_grid_outercorner) && (r_grid_innercorner < rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner < theta_max))\n {\n block_cnt+=1;\n }\n \n } #pragma omp parallel for num_threads(num_thread) reduction(+:block_cnt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mc_cyclosynch.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) reduction(+:null_ph_count)", "context_chars": 100, "text": "IER INDEXES AND HOW MANY, dont subtract this from ph_tot @ the end, WILL NEED FOR PRINT PHOTONS\n for (i=0;i<*num_ph;i++)\n {\n if (((*ph_orig)[i].weight == 0)) //if photons are null COMPTONIZED_PHOTON photons and not absorbed UNABSORBED_CS_PHOTON photons\n {\n null_ph_count+=1;\n }\n } #pragma omp parallel for num_threads(num_thread) reduction(+:null_ph_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mc_cyclosynch.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate(b_field, el_dens, nu_c) reduction(+:abs_ph_count)", "context_chars": 100, "text": "h=0;//set thsi equal to 0, to recount in this function and get prepared for the next frame\n \n for (i=0;i<*num_ph;i++)\n {\n if (((*ph_orig)[i].weight != 0) && ((*ph_orig)[i].nearest_block_index != -1))\n {\n // if the photon isnt a null photon already, see if it should be absorbed\n \n el_dens= (hydro_data->dens)[(*ph_orig)[i].nearest_block_index]/M_P;//(*(dens+(*ph_orig)[i].nearest_block_index))/M_P;\n #if B_FIELD_CALC == TOTAL_E || B_FIELD_CALC == INTERNAL_E\n b_field=calcB(el_dens,(hydro_data->temp)[(*ph_orig)[i].nearest_block_index]);\n #else\n #if DIMENSIONS == TWO\n b_field=vectorMagnitude((hydro_data->B0)[(*ph_orig)[i].nearest_block_index], (hydro_data->B1)[(*ph_orig)[i].nearest_block_index], 0);\n #else\n b_field=vectorMagnitude((hydro_data->B0)[(*ph_orig)[i].nearest_block_index], (hydro_data->B1)[(*ph_orig)[i].nearest_block_index], (hydro_data->B2)[(*ph_orig)[i].nearest_block_index]);\n \n \n nu_c=calcCyclotronFreq(b_field);\n\n //nu_c=calcCyclotronFreq(calcB(el_dens, (hydro_data->temp)[(*ph_orig)[i].nearest_block_index])); old way //*(temp+(*ph_orig)[i].nearest_block_index)));\n //printf(\"photon %d has lab nu %e comv frequency %e and nu_c %e with FLASH grid number %d\\n\", i, (*ph_orig)[i].p0*C_LIGHT/PL_CONST, (*ph_orig)[i].comv_p0*C_LIGHT/PL_CONST, nu_c, (*ph_orig)[i].nearest_block_index);\n if (((*ph_orig)[i].comv_p0*C_LIGHT/PL_CONST <= nu_c) || ((*ph_orig)[i].type == CS_POOL_PHOTON))\n {\n //if the photon has a frequency less that nu_c, it should be absorbed and becomes a null photon\n //preset values for the the newly created spots to hold the emitted phtoons in;\n \n //if this is a synchrotron photons or photons that have been scattered that were once synch photons in this frame\n //fprintf(fPtr,\"photon %d being absorbed\\n\", i);\n if (((*ph_orig)[i].type != INJECTED_PHOTON) && ((*ph_orig)[i].type != UNABSORBED_CS_PHOTON) )\n {\n (*ph_orig)[i].weight=0;\n (*ph_orig)[i].nearest_block_index=-1;\n abs_ph_count++;\n \n if ((*ph_orig)[i].type == CS_POOL_PHOTON)\n {\n synch_ph_count++;\n }\n }\n else\n {\n //have an injected photon or UNABSORBED_CS_PHOTON (previous COMPTONIZED_PHOTON photon) that has a nu that can be absorbed\n abs_count+=(*ph_orig)[i].weight;\n (*ph_orig)[i].p0=-1; //set its energy negative so we know for later analysis that it can't be used and its been absorbed,\n (*ph_orig)[i].nearest_block_index=-1;\n //also set the weight equal to 0 since we no longer care about saving it\n (*ph_orig)[i].weight=0;\n abs_ph_count++;\n\n }\n }\n else\n {\n //if the phootn isnt going to be absorbed, see if its a COMPTONIZED_PHOTON photon thats survived and change it to an injected type\n \n //replace the potantial null photon with this photon's data\n (*ph_orig)[count].p0=(*ph_orig)[i].p0;\n (*ph_orig)[count].p1=(*ph_orig)[i].p1;\n (*ph_orig)[count].p2=(*ph_orig)[i].p2;\n (*ph_orig)[count].p3=(*ph_orig)[i].p3;\n (*ph_orig)[count].comv_p0=(*ph_orig)[i].comv_p0;\n (*ph_orig)[count].comv_p1=(*ph_orig)[i].comv_p1;\n (*ph_orig)[count].comv_p2=(*ph_orig)[i].comv_p2;\n (*ph_orig)[count].comv_p3=(*ph_orig)[i].comv_p3;\n (*ph_orig)[count].r0= (*ph_orig)[i].r0;\n (*ph_orig)[count].r1=(*ph_orig)[i].r1 ;\n (*ph_orig)[count].r2=(*ph_orig)[i].r2;\n (*ph_orig)[count].s0=(*ph_orig)[i].s0;\n (*ph_orig)[count].s1=(*ph_orig)[i].s1;\n (*ph_orig)[count].s2=(*ph_orig)[i].s2;\n (*ph_orig)[count].s3=(*ph_orig)[i].s3;\n (*ph_orig)[count].num_scatt=(*ph_orig)[i].num_scatt;\n (*ph_orig)[count].weight=(*ph_orig)[i].weight;\n (*ph_orig)[count].nearest_block_index=(*ph_orig)[i].nearest_block_index;\n (*ph_orig)[count].type=(*ph_orig)[i].type;\n \n //increment count\n count+=1;\n \n if (((*ph_orig)[i].type == COMPTONIZED_PHOTON) || ((*ph_orig)[i].type == UNABSORBED_CS_PHOTON) )\n {\n //if the photon is a COMPTONIZED_PHOTON phton (scattered synch photon from the current frame) or a UNABSORBED_CS_PHOTON photon (scattered synch photon) from an old frame\n //count how many of these there are\n *scatt_cyclosynch_num_ph+=1;\n }\n \n }\n }\n else\n {\n //see if the photon was a previous INJECTED_PHOTON photon absorbed that we still have to account for in the array\n if (((*ph_orig)[i].p0 < 0) )\n {\n //replace the potantial null photon with this photon's data\n (*ph_orig)[count].p0=(*ph_orig)[i].p0;\n (*ph_orig)[count].p1=(*ph_orig)[i].p1;\n (*ph_orig)[count].p2=(*ph_orig)[i].p2;\n (*ph_orig)[count].p3=(*ph_orig)[i].p3;\n (*ph_orig)[count].comv_p0=(*ph_orig)[i].comv_p0;\n (*ph_orig)[count].comv_p1=(*ph_orig)[i].comv_p1;\n (*ph_orig)[count].comv_p2=(*ph_orig)[i].comv_p2;\n (*ph_orig)[count].comv_p3=(*ph_orig)[i].comv_p3;\n (*ph_orig)[count].r0= (*ph_orig)[i].r0;\n (*ph_orig)[count].r1=(*ph_orig)[i].r1 ;\n (*ph_orig)[count].r2=(*ph_orig)[i].r2;\n (*ph_orig)[count].s0=(*ph_orig)[i].s0;\n (*ph_orig)[count].s1=(*ph_orig)[i].s1;\n (*ph_orig)[count].s2=(*ph_orig)[i].s2;\n (*ph_orig)[count].s3=(*ph_orig)[i].s3;\n (*ph_orig)[count].num_scatt=(*ph_orig)[i].num_scatt;\n (*ph_orig)[count].weight=(*ph_orig)[i].weight;\n (*ph_orig)[count].nearest_block_index=(*ph_orig)[i].nearest_block_index;\n (*ph_orig)[count].type=(*ph_orig)[i].type;\n \n //increment count\n count+=1;\n }\n }\n \n //fprintf(fPtr, \"photon %d has energy %e and weight %e with FLASH grid number %d\\n\", i, (*ph_orig)[i].p0*C_LIGHT/1.6e-9, (*ph_orig)[i].weight, (*ph_orig)[i].nearest_block_index);\n } #pragma omp parallel for num_threads(num_thread) firstprivate(b_field, el_dens, nu_c) reduction(+:abs_ph_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mcrat_io.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) reduction(+:weight_net_num_ph)", "context_chars": 100, "text": "BOVE\n count=0;//used to keep track of weight values since it may not be the same as num_ph\n //for (i=0;iweight != 0)\n {\n p0[count]= ((ph+i)->p0);\n p1[count]= ((ph+i)->p1);\n p2[count]= ((ph+i)->p2);\n p3[count]= ((ph+i)->p3);\n r0[count]= ((ph+i)->r0);\n r1[count]= ((ph+i)->r1);\n r2[count]= ((ph+i)->r2);\n #if COMV_SWITCH == ON\n {\n comv_p0[count]= ((ph+i)->comv_p0);\n comv_p1[count]= ((ph+i)->comv_p1);\n comv_p2[count]= ((ph+i)->comv_p2);\n comv_p3[count]= ((ph+i)->comv_p3);\n }\n \n #if STOKES_SWITCH == ON\n {\n s0[count]= ((ph+i)->s0);\n s1[count]= ((ph+i)->s1);\n s2[count]= ((ph+i)->s2);\n s3[count]= ((ph+i)->s3);\n }\n \n num_scatt[count]= ((ph+i)->num_scatt);\n weight[count]= ((ph+i)->weight);\n //fprintf(fPtr, \"%d %c %e %e %e %e %e %e %e %e\\n\", i, (ph+i)->type, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2, (ph+i)->num_scatt, (ph+i)->weight, (ph+i)->p0, (ph+i)->comv_p0, (ph+i)->p0*C_LIGHT/1.6e-9);\n \n if ((frame==frame_last))\n {\n global_weight[count]=((ph+i)->weight);\n }\n \n *(ph_type+count)=(ph+i)->type;\n //printf(\"%d %c %e %e %e %e %e %e %e %e %c\\n\", i, (ph+i)->type, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2, (ph+i)->num_scatt, (ph+i)->weight, (ph+i)->p0, (ph+i)->comv_p0, (ph+i)->p0*C_LIGHT/1.6e-9, *(ph_type+count));\n \n count++;\n }\n \n } #pragma omp parallel for num_threads(num_thread) reduction(+:weight_net_num_ph)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mcrat_io.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate( filename_k, file_no_thread_num, cmd,mcdata_type,num_files, increment ) private(i,j,k)", "context_chars": 100, "text": "dset_weight, dset_weight_frame, dset_ph_type;\n \n //printf(\"Merging files in %s\\n\", dir);\n //// i < last frame because calculation before this function gives last_frame as the first frame of the next process set of frames to merge files for\n \n #if COMV_SWITCH == ON && STOKES_SWITCH == ON\n {\n num_types=17;//both switches on, want to save comv and stokes\n } #pragma omp parallel for num_threads(num_thread) firstprivate( filename_k, file_no_thread_num, cmd,mcdata_type,num_files, increment ) private(i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nt)", "context_chars": 100, "text": "xs0;\n\n rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *));\n rng[0]=rand;\n\n //for(i=1;i #pragma omp parallel for num_threads(nt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate( is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, ph_r, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker, ph_p_comv, el_p, ph_p, fluid_beta) private(i) shared(min_mfp ) reduction(+:num_photons_find_new_element)", "context_chars": 100, "text": "mes the min (max) photon radius\n //or just parallelize this part here\n \n min_mfp=1e12;\n for (i=0;inearest_block_index), ((ph+i)->weight));\n //fflush(fPtr);\n \n if (find_nearest_block_switch==0)\n {\n ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault here\n }\n else\n {\n ph_block_index=0; // therefore if starting a new frame set index=0 to avoid this issue\n }\n \n mcratCoordinateToHydroCoordinate(&photon_hydro_coord, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2);//convert the photons coordinate to the hydro sim coordinate system\n \n //printf(\"ph_x:%e, ph_y:%e\\n\", ph_x, ph_y);\n \n //if the location of the photon is inside the domain of the hydro simulation then do all of this, otherwise assign huge mfp value so no scattering occurs and the next frame is loaded\n // absorbed photons have ph_block_index=-1, therefore if this value is not less than 0, calulate the mfp properly but doesnt work when go to new frame and find new indexes (will change b/c will get rid of these photons when printing)\n //alternatively make decision based on 0 weight\n #if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE\n if (((photon_hydro_coord[1]<(hydro_data->r1_domain)[1]) &&\n (photon_hydro_coord[1]>(hydro_data->r1_domain)[0]) &&\n (photon_hydro_coord[0]<(hydro_data->r0_domain)[1]) &&\n (photon_hydro_coord[0]>(hydro_data->r0_domain)[0])) && ((ph+i)->nearest_block_index != -1) ) //can use sorted index to see which photons have been absorbed efficiently before printing and get the indexes\n #else\n if (((photon_hydro_coord[2]<(hydro_data->r2_domain)[1]) &&\n (photon_hydro_coord[2]>(hydro_data->r2_domain)[0]) &&\n (photon_hydro_coord[1]<(hydro_data->r1_domain)[1]) &&\n (photon_hydro_coord[1]>(hydro_data->r1_domain)[0]) &&\n (photon_hydro_coord[0]<(hydro_data->r0_domain)[1]) &&\n (photon_hydro_coord[0]>(hydro_data->r0_domain)[0])) && ((ph+i)->nearest_block_index != -1) )\n \n {\n\n is_in_block=checkInBlock(photon_hydro_coord[0], photon_hydro_coord[1], photon_hydro_coord[2], hydro_data, ph_block_index);\n \n //when rebinning photons can have comoving 4 momenta=0 and nearest_block_index=0 (and block 0 be the actual block the photon is in making it not refind the proper index and reclaulate the comoving 4 momenta) which can make counting synch scattered photons be thrown off, thus take care of this case by forcing the function to recalc things\n #if CYCLOSYNCHROTRON_SWITCH == ON\n if ((ph_block_index==0) && ( ((ph+i)->comv_p0)+((ph+i)->comv_p1)+((ph+i)->comv_p2)+((ph+i)->comv_p3) == 0 ) )\n {\n is_in_block=0; //say that photon is not in the block, force it to recompute things\n }\n \n \n if (find_nearest_block_switch==0 && is_in_block)\n {\n //keep the saved grid index\n min_index=ph_block_index;\n }\n else\n {\n //find the new index of the block closest to the photon\n //min_index=findNearestBlock(array_num, ph_x, ph_y, ph_z, x, y, z); //stop doing this one b/c nearest grid could be one that the photon isnt actually in due to adaptive mesh\n \n //find the new index of the block that the photon is actually in\n min_index=findContainingBlock(photon_hydro_coord[0], photon_hydro_coord[1], photon_hydro_coord[2], hydro_data, fPtr); //(array_num, ph_x, ph_y, ph_z, x, y, z, szx, szy, ph_block_index, find_nearest_block_switch, fPtr);\n \n if (min_index != -1)\n {\n (ph+i)->nearest_block_index=min_index; //save the index if min_index != -1\n \n //also recalculate the photons' comoving frequency in this new fluid element\n ph_p[0]=((ph+i)->p0);\n ph_p[1]=((ph+i)->p1);\n ph_p[2]=((ph+i)->p2);\n ph_p[3]=((ph+i)->p3);\n \n #if DIMENSIONS == THREE\n hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], (hydro_data->r2)[min_index]);\n #elif DIMENSIONS == TWO_POINT_FIVE\n ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));\n hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi);\n #else\n ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));\n //this may have to change if PLUTO can save vectors in 3D when conidering 2D sim\n hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], 0, (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi);\n \n\n \n lorentzBoost(&fluid_beta, &ph_p, &ph_p_comv, 'p', fPtr);\n \n ((ph+i)->comv_p0)=ph_p_comv[0];\n ((ph+i)->comv_p1)=ph_p_comv[1];\n ((ph+i)->comv_p2)=ph_p_comv[2];\n ((ph+i)->comv_p3)=ph_p_comv[3];\n \n num_photons_find_new_element+=1;\n }\n else\n {\n \tfprintf(fPtr, \"Photon number %d FLASH index not found, making sure it doesnt scatter.\\n\", i);\n }\n \n }\n \n //if min_index!= -1 (know which fluid element photon is in) do all this stuff, otherwise make sure photon doesnt scatter\n if (min_index != -1)\n {\n //fprintf(fPtr,\"Min Index: %d\\n\", min_index);\n \n //save values\n (n_dens_lab_tmp)= (hydro_data->dens_lab)[min_index];//(*(dens_lab+min_index));\n (n_temp_tmp)= (hydro_data->temp)[min_index];//(*(temp+min_index));\n \n #if DIMENSIONS == THREE\n hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], (hydro_data->r2)[min_index]);\n #elif DIMENSIONS == TWO_POINT_FIVE\n ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));\n hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi);\n #else\n ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));\n //this may have to change if PLUTO can save vectors in 3D when conidering 2D sim\n hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], 0, (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi);\n \n \n fl_v_x=fluid_beta[0];\n fl_v_y=fluid_beta[1];\n fl_v_z=fluid_beta[2];\n \n fl_v_norm=sqrt(fl_v_x*fl_v_x+fl_v_y*fl_v_y+fl_v_z*fl_v_z);\n ph_v_norm=sqrt(((ph+i)->p1)*((ph+i)->p1)+((ph+i)->p2)*((ph+i)->p2)+((ph+i)->p3)*((ph+i)->p3));\n \n //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product\n n_cosangle=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined\n \n beta=sqrt(1.0-1.0/((hydro_data->gamma)[min_index]*(hydro_data->gamma)[min_index]));\n \n //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case\n rnd_tracker=0;\n #if defined(_OPENMP)\n thread_id=omp_get_thread_num();\n \n \n rnd_tracker=gsl_rng_uniform_pos(rng[thread_id]);\n //printf(\"Rnd_tracker: %e Thread number %d \\n\",rnd_tracker, omp_get_thread_num() );\n \n //mfp=(-1)*log(rnd_tracker)*(M_P/((n_dens_tmp))/(THOM_X_SECT)); ///(1.0-beta*((n_cosangle)))) ; // the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths IN COMOV FRAME for reference\n mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOM_X_SECT/(1.0-beta*n_cosangle))*log(rnd_tracker) ;\n \n \n }\n else\n {\n mfp=min_mfp;\n }\n }\n else\n {\n mfp=min_mfp;\n //fprintf(fPtr,\"Photon %d In ELSE\\n\", i);\n //exit(0);\n }\n \n *(all_time_steps+i)=mfp/C_LIGHT;\n //fprintf(fPtr,\"Photon %d has time %e\\n\", i, *(all_time_steps+i));\n //fflush(fPtr);\n \n } #pragma omp parallel for num_threads(num_thread) firstprivate( is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, ph_r, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker, ph_p_comv, el_p, ph_p, fluid_beta) private(i) shared(min_mfp ) reduction(+:num_photons_find_new_element)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(nt)", "context_chars": 100, "text": " rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *)); \n rng[0]=rand;\n\n //for(i=1;i #pragma omp parallel for num_threads(nt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate( r, theta,dv, v, all_adjacent_block_indexes, j, left_block_index, right_block_index, top_block_index, bottom_block_index, is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )", "context_chars": 100, "text": "mes the min (max) photon radius\n //or just parallelize this part here\n \n min_mfp=1e12;\n for (i=0;ir0), ((ph+i)->r1));\n if (find_nearest_block_switch==0)\n {\n ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault\n }\n else\n {\n ph_block_index=0; //if starting a new frame set index=0 to avoid this issue\n }\n \n //if (strcmp(DIM_SWITCH, dim_2d_str)==0)\n #if DIMENSIONS == 2\n {\n ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate\n ph_y=((ph+i)->r2);\n ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));\n \n }\n #else\n {\n ph_x=((ph+i)->r0);\n ph_y=((ph+i)->r1);\n ph_z=((ph+i)->r2);\n \n }\n \n //printf(\"ph_x:%e, ph_y:%e\\n\", ph_x, ph_y);\n \n is_in_block=checkInBlock(ph_block_index, ph_x, ph_y, ph_z, x, y, z, szx, szy);\n \n if (find_nearest_block_switch==0 && is_in_block)\n {\n //keep the saved grid index\n min_index=ph_block_index;\n }\n else\n {\n //find the new index of the block closest to the photon\n //min_index=findNearestBlock(array_num, ph_x, ph_y, ph_z, x, y, z); //stop doing this one b/c nearest grid could be one that the photon isnt actually in due to adaptive mesh\n \n //find the new index of the block that the photon is actually in\n //min_index=findContainingBlock(array_num, ph_x, ph_y, ph_z, x, y, z, szx, szy, ph_block_index, find_nearest_block_switch, fPtr);\n \n (ph+i)->nearest_block_index=min_index; //save the index\n \n }\n \n //look for the blocks surounding the block of interest and order them by the \n left_dist_min=1e15;//set dist to impossible value to make sure at least first distance calulated is saved\n right_dist_min=1e15;\n top_dist_min=1e15;\n bottom_dist_min=1e15;\n for (j=0;j(*(x+min_index)) && (dist < right_dist_min))\n {\n right_block_index=j;\n right_dist_min=dist;\n }\n \n if ((*(y+j))<(*(y+min_index)) && (dist < bottom_dist_min) )\n {\n bottom_block_index=j;\n bottom_dist_min=dist;\n }\n else if ((*(y+j))>(*(y+min_index)) && (dist < top_dist_min) )\n {\n top_block_index=j;\n top_dist_min=dist;\n }\n \n }\n all_adjacent_block_indexes[0]=left_block_index;\n all_adjacent_block_indexes[1]=right_block_index;\n all_adjacent_block_indexes[2]=bottom_block_index;\n all_adjacent_block_indexes[3]=top_block_index; \n \n //do a weighted average of the 4 nearest grids based on volume\n v=0;\n (n_dens_lab_tmp)=0;\n (n_vx_tmp)= 0;\n (n_vy_tmp)= 0;\n (n_temp_tmp)= 0;\n (n_vz_tmp)= 0;\n \n for (j=0;j<4;j++)\n {\n \n #if SIM_SWITCH == RIKEN\n {\n r=pow(pow((*(x+all_adjacent_block_indexes[j])),2.0)+pow((*(y+all_adjacent_block_indexes[j])),2.0), 0.5);\n theta=atan2((*(x+all_adjacent_block_indexes[j])), (*(y+all_adjacent_block_indexes[j])));\n dv=2.0*M_PI*pow(r,2)*sin(theta)*(*(szx+all_adjacent_block_indexes[j]))*(*(szy+all_adjacent_block_indexes[j])) ;\n }\n #else\n {\n //using FLASH\n dv=2.0*M_PI*(*(x+all_adjacent_block_indexes[j]))*pow(*(szx+all_adjacent_block_indexes[j]),2.0) ;\n\n }\n \n \n v+=dv;\n \n //save values\n (n_dens_lab_tmp)+= (*(dens_lab+all_adjacent_block_indexes[j]))*dv;\n (n_vx_tmp)+= (*(velx+all_adjacent_block_indexes[j]))*dv;\n (n_vy_tmp)+= (*(vely+all_adjacent_block_indexes[j]))*dv;\n (n_temp_tmp)+= (*(temp+all_adjacent_block_indexes[j]))*dv;\n \n //if (strcmp(DIM_SWITCH, dim_3d_str)==0)\n #if DIMENSIONS == 3\n {\n (n_vz_tmp)+= (*(velz+all_adjacent_block_indexes[j]))*dv;\n }\n \n \n }\n \n\n //fprintf(fPtr,\"Outside\\n\");\n \n //save values\n (n_dens_lab_tmp)/= v;\n (n_vx_tmp)/= v;\n (n_vy_tmp)/= v;\n (n_temp_tmp)/= v;\n //if (strcmp(DIM_SWITCH, dim_3d_str)==0)\n #if DIMENSIONS == 3\n {\n (n_vz_tmp)/= v;\n }\n \n \n //if (strcmp(DIM_SWITCH, dim_2d_str)==0)\n #if DIMENSIONS == 2\n {\n fl_v_x=n_vx_tmp*cos(ph_phi);\n fl_v_y=n_vx_tmp*sin(ph_phi);\n fl_v_z=n_vy_tmp;\n }\n #else\n {\n fl_v_x=n_vx_tmp;\n fl_v_y=n_vy_tmp;\n fl_v_z=n_vz_tmp;\n }\n \n \n fl_v_norm=pow(pow(fl_v_x, 2.0)+pow(fl_v_y, 2.0)+pow(fl_v_z, 2.0), 0.5);\n ph_v_norm=pow(pow(((ph+i)->p1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5);\n \n //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product\n (n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined\n \n //if (strcmp(DIM_SWITCH, dim_2d_str)==0)\n #if DIMENSIONS == 2\n {\n beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)),0.5);\n }\n #else\n {\n beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)+pow((n_vz_tmp),2)),0.5);\n }\n \n //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case\n rnd_tracker=0;\n #if defined(_OPENMP)\n thread_id=omp_get_thread_num();\n \n \n rnd_tracker=gsl_rng_uniform_pos(rng[thread_id]);\n \n mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOM_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths \n \n \n #pragma omp critical \n if ( mfp #pragma omp parallel for num_threads(num_thread) firstprivate( r, theta,dv, v, all_adjacent_block_indexes, j, left_block_index, right_block_index, top_block_index, bottom_block_index, is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate(old_position, new_position, divide_p0)", "context_chars": 100, "text": "get_num_threads();\n #endif\n double old_position=0, new_position=0, divide_p0=0;\n \n \n for (i=0;itype != CS_POOL_PHOTON) && ((ph+i)->weight != 0))\n {\n old_position= sqrt(((ph+i)->r0)*((ph+i)->r0)+((ph+i)->r1)*((ph+i)->r1)+((ph+i)->r2)*((ph+i)->r2)); //uncommented checks since they were not necessary anymore\n \n divide_p0=1.0/((ph+i)->p0);\n \n ((ph+i)->r0)+=((ph+i)->p1)*divide_p0*C_LIGHT*t; //update x position\n \n ((ph+i)->r1)+=((ph+i)->p2)*divide_p0*C_LIGHT*t;//update y\n \n ((ph+i)->r2)+=((ph+i)->p3)*divide_p0*C_LIGHT*t;//update z\n \n new_position= sqrt(((ph+i)->r0)*((ph+i)->r0)+((ph+i)->r1)*((ph+i)->r1)+((ph+i)->r2)*((ph+i)->r2));\n /*\n if ((new_position-old_position)/t > C_LIGHT)\n {\n fprintf(fPtr, \"PHOTON NUMBER %d IS SUPERLUMINAL. ITS SPEED IS %e c.\\n\", i, ((new_position-old_position)/t)/C_LIGHT);\n }\n */\n //if ( (ph+i)->s0 != 1)\n {\n //\tfprintf(fPtr, \"PHOTON NUMBER %d DOES NOT HAVE I=1. Instead it is: %e\\n\", i, (ph+i)->s0);\n }\n \n //printf(\"In update function: %e, %e, %e, %e, %e, %e, %e\\n\",((ph+i)->r0), ((ph+i)->r1), ((ph+i)->r2), t, ((ph+i)->p1)/((ph+i)->p0), ((ph+i)->p2)/((ph+i)->p0), ((ph+i)->p3)/((ph+i)->p0) );\n }\n } #pragma omp parallel for num_threads(num_thread) firstprivate(old_position, new_position, divide_p0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mclib.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:e_sum) reduction(+:w_sum)", "context_chars": 100, "text": "_OPENMP)\n int num_thread=omp_get_num_threads();\n #endif\n double e_sum=0, w_sum=0;\n \n for (i=0;iweight != 0)) //dont want account for null or absorbed UNABSORBED_CS_PHOTON photons\n \n {\n e_sum+=(((ph+i)->p0)*((ph+i)->weight));\n w_sum+=((ph+i)->weight);\n }\n } #pragma omp parallel for reduction(+:e_sum) reduction(+:w_sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) reduction(min:temp_min) reduction(max:temp_max) reduction(+:sum) reduction(+:avg_r_sum) reduction(+:count)", "context_chars": 100, "text": "um_synch=0, avg_r_sum_comp=0, avg_r_sum_inject=0;\n \n //printf(\"Num threads: %d\", num_thread);\nfor (i=0;iweight != 0)) //dont want account for null or absorbed UNABSORBED_CS_PHOTON photons\n \n {\n sum+=((ph+i)->num_scatt);\n avg_r_sum+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));\n \n //printf(\"%d %c %e %e %e %e %e %e\\n\", i, (ph+i)->type, (ph+i)->p0, (ph+i)->comv_p0, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2, (ph+i)->num_scatt);\n \n if (((ph+i)->num_scatt) > temp_max )\n {\n temp_max=((ph+i)->num_scatt);\n //printf(\"The new max is: %d\\n\", temp_max);\n }\n \n //if ((i==0) || (((ph+i)->num_scatt)num_scatt)num_scatt);\n //printf(\"The new min is: %d\\n\", temp_min);\n }\n \n if (((ph+i)->type) == INJECTED_PHOTON )\n {\n avg_r_sum_inject+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));\n count_i++;\n }\n \n #if CYCLOSYNCHROTRON_SWITCH == ON\n if ((((ph+i)->type) == COMPTONIZED_PHOTON) || (((ph+i)->type) == UNABSORBED_CS_PHOTON))\n {\n avg_r_sum_comp+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));\n count_comp++;\n }\n \n \n count++;\n }\n \n #if CYCLOSYNCHROTRON_SWITCH == ON\n if (((ph+i)->type) == CS_POOL_PHOTON )\n {\n avg_r_sum_synch+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));\n count_synch++;\n }\n \n \n } #pragma omp parallel for num_threads(num_thread) reduction(min:temp_min) reduction(max:temp_max) reduction(+:sum) reduction(+:avg_r_sum) reduction(+:count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/lazzati-astro/MCRaT/Src/mclib.c", "omp_pragma_line": "#pragma omp parallel for num_threads(num_thread) firstprivate(ph_r, ph_theta) reduction(min:temp_r_min) reduction(max:temp_r_max) reduction(min:temp_theta_min) reduction(max:temp_theta_max)", "context_chars": 100, "text": "d(_OPENMP)\n int num_thread=omp_get_num_threads();\n #endif\n double ph_r=0, ph_theta=0;\n \nfor (i=0;iweight != 0)\n {\n ph_r=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));\n ph_theta=acos(((ph+i)->r2) /ph_r); //this is the photons theta psition in the FLASH grid, gives in radians\n if (ph_r > temp_r_max )\n {\n temp_r_max=ph_r;\n //fprintf(fPtr, \"The new max is: %e from photon %d with x: %e y: %e z: %e\\n\", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2);\n }\n \n //if ((i==0) || (ph_rr0), (ph+i)->r1, (ph+i)->r2);\n }\n \n if (ph_theta > temp_theta_max )\n {\n temp_theta_max=ph_theta;\n //fprintf(fPtr, \"The new max is: %e from photon %d with x: %e y: %e z: %e\\n\", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2);\n }\n \n //if ((i==0) || (ph_rr0), (ph+i)->r1, (ph+i)->r2);\n }\n }\n } #pragma omp parallel for num_threads(num_thread) firstprivate(ph_r, ph_theta) reduction(min:temp_r_min) reduction(max:temp_r_max) reduction(min:temp_theta_min) reduction(max:temp_theta_max)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks-fortran/utilities/fpolybench.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " double* flush = (double*) calloc (cs, sizeof(double));\n int i;\n double tmp = 0.0;\n#ifdef _OPENMP\nfor (i = 0; i < cs; i++)\n tmp += flush[i];\n assert (tmp <= 10.0);\n free (flush);\n}\n\n\n#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER\nvoid polybench_linux_fifo_scheduler()\n{\n /* Use FIFO scheduler to limit OS interference. Program must be run\n as root, and this works only for Linux kernels. */\n struct sched_param schedParam;\n schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);\n sched_setscheduler (0, SCHED_FIFO, &schedParam);\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " 0],double B[500 + 0][500 + 0])\n{\n //int i;\n //int j;\n{\n int c1;\n int c2;\n if (n >= 1) {\nfor (c1 = 0; c1 <= n + -1; c1++) {\n for (c2 = 0; c2 <= n + -1; c2++) {\n X[c1][c2] = (((double )c1) * (c2 + 1) + 1) / n;\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c8)", "context_chars": 100, "text": "int i2;\n \n //#pragma scop\n{\n int c0;\n int c2;\n int c8;\n for (c0 = 0; c0 <= 9; c0++) {\nfor (c2 = 0; c2 <= 499; c2++) {\n for (c8 = 1; c8 <= 499; c8++) {\n B[c2][c8] = B[c2][c8] - A[c2][c8] * A[c2][c8] / B[c2][c8 - 1];\n }\n for (c8 = 1; c8 <= 499; c8++) {\n X[c2][c8] = X[c2][c8] - X[c2][c8 - 1] * A[c2][c8] / B[c2][c8 - 1];\n }\n for (c8 = 0; c8 <= 497; c8++) {\n X[c2][500 - c8 - 2] = (X[c2][500 - 2 - c8] - X[c2][500 - 2 - c8 - 1] * A[c2][500 - c8 - 3]) / B[c2][500 - 3 - c8];\n }\n } #pragma omp parallel for private(c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "- 2 - c8] - X[c2][500 - 2 - c8 - 1] * A[c2][500 - c8 - 3]) / B[c2][500 - 3 - c8];\n }\n }\nfor (c2 = 0; c2 <= 499; c2++) {\n X[c2][500 - 1] = X[c2][500 - 1] / B[c2][500 - 1];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c8)", "context_chars": 100, "text": " for (c2 = 0; c2 <= 499; c2++) {\n X[c2][500 - 1] = X[c2][500 - 1] / B[c2][500 - 1];\n }\nfor (c2 = 0; c2 <= 499; c2++) {\n for (c8 = 1; c8 <= 499; c8++) {\n B[c8][c2] = B[c8][c2] - A[c8][c2] * A[c8][c2] / B[c8 - 1][c2];\n }\n for (c8 = 1; c8 <= 499; c8++) {\n X[c8][c2] = X[c8][c2] - X[c8 - 1][c2] * A[c8][c2] / B[c8 - 1][c2];\n }\n for (c8 = 0; c8 <= 497; c8++) {\n X[500 - 2 - c8][c2] = (X[500 - 2 - c8][c2] - X[500 - c8 - 3][c2] * A[500 - 3 - c8][c2]) / B[500 - 2 - c8][c2];\n }\n } #pragma omp parallel for private(c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "- 2 - c8][c2] - X[500 - c8 - 3][c2] * A[500 - 3 - c8][c2]) / B[500 - 2 - c8][c2];\n }\n }\nfor (c2 = 0; c2 <= 499; c2++) {\n X[500 - 1][c2] = X[500 - 1][c2] / B[500 - 1][c2];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB195-diffusion1-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ") / RAND_MAX;\n u1[0] = u1[n - 1] = u2[0] = u2[n - 1] = 0.5;\n for (int t = 0; t < nsteps; t++)\n {\nfor (int i = 1; i < n - 1; i++)\n {\n u2[i] = u1[i] + c * (u1[i - 1] + u1[i + 1] - 2 * u1[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp,i,j)", "context_chars": 100, "text": "oat u[100][100];\n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n u[i][j] = 0.5; \n\nfor (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private (temp,i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB019-plusplus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i(argv[1]);\n\n int input[inLen]; \n int output[inLen];\n for (i=0; ifor (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " x[len]; \n\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n }\n\nfor (i=numNodes-1 ; i>-1 ; --i) {\n if (x[i]<=0) {\n numNodes2-- ;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp,i,j)", "context_chars": 100, "text": "loat u[len][len];\n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n u[i][j] = 0.5;\n\nfor (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private (temp,i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c3, c4, c2)", "context_chars": 100, "text": "0 + 0])\n{\n //int i;\n //int j;\n{\n int c1;\n int c2;\n int c4;\n int c3;\n if (n >= 1) {\nfor (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {\n A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;\n B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;\n }\n }\n }\n } #pragma omp parallel for private(c3, c4, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5, c4, c2, c3)", "context_chars": 100, "text": "16 + 1) / 16) : -((-(n + 3 * tsteps + -4) + 16 - 1) / 16))) : (n + 3 * tsteps + -4) / 16)); c0++) {\nfor (c1 = (((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) > (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))?((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) : (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))); c1 <= (((((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) < c0?(((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) : c0)); c1++) {\n for (c2 = ((((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) > 2 * c0 + -2 * c1?(((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) : 2 * c0 + -2 * c1); c2 <= (((((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) < (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16))?(((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) : (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16)))); c2++) {\n if (c0 <= (((32 * c1 + 16 * c2 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 + 1) / 32) : -((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 - 1) / 32))) : (32 * c1 + 16 * c2 + -1 * n + 1) / 32)) && c1 <= c2 + -1) {\n if ((n + 1) % 2 == 0) {\n for (c4 = (16 * c1 > 16 * c2 + -1 * n + 3?16 * c1 : 16 * c2 + -1 * n + 3); c4 <= 16 * c1 + 15; c4++) {\n A[-16 * c2 + c4 + n + -2][n + -2] = B[-16 * c2 + c4 + n + -2][n + -2];\n }\n }\n }\n if (c0 <= (((48 * c1 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(48 * c1 + -1 * n + 1) + 32 + 1) / 32) : -((-(48 * c1 + -1 * n + 1) + 32 - 1) / 32))) : (48 * c1 + -1 * n + 1) / 32)) && c1 >= c2) {\n if ((n + 1) % 2 == 0) {\n for (c5 = (16 * c2 > 16 * c1 + -1 * n + 3?16 * c2 : 16 * c1 + -1 * n + 3); c5 <= ((16 * c1 < 16 * c2 + 15?16 * c1 : 16 * c2 + 15)); c5++) {\n A[n + -2][-16 * c1 + c5 + n + -2] = B[n + -2][-16 * c1 + c5 + n + -2];\n }\n }\n }\n for (c3 = ((((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) > 16 * c0 + -16 * c1?(((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) : 16 * c0 + -16 * c1); c3 <= ((((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) < 16 * c0 + -16 * c1 + 15?((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) : 16 * c0 + -16 * c1 + 15)); c3++) {\n if (c1 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {\n for (c5 = (16 * c2 > 2 * c3 + 1?16 * c2 : 2 * c3 + 1); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {\n B[1][-2 * c3 + c5] = 0.2 * (A[1][-2 * c3 + c5] + A[1][-2 * c3 + c5 - 1] + A[1][1 + (-2 * c3 + c5)] + A[1 + 1][-2 * c3 + c5] + A[1 - 1][-2 * c3 + c5]);\n }\n }\n for (c4 = (16 * c1 > 2 * c3 + 2?16 * c1 : 2 * c3 + 2); c4 <= ((16 * c1 + 15 < 2 * c3 + n + -2?16 * c1 + 15 : 2 * c3 + n + -2)); c4++) {\n if (c2 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {\n B[-2 * c3 + c4][1] = 0.2 * (A[-2 * c3 + c4][1] + A[-2 * c3 + c4][1 - 1] + A[-2 * c3 + c4][1 + 1] + A[1 + (-2 * c3 + c4)][1] + A[-2 * c3 + c4 - 1][1]);\n }\n for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {\n B[-2 * c3 + c4][-2 * c3 + c5] = 0.2 * (A[-2 * c3 + c4][-2 * c3 + c5] + A[-2 * c3 + c4][-2 * c3 + c5 - 1] + A[-2 * c3 + c4][1 + (-2 * c3 + c5)] + A[1 + (-2 * c3 + c4)][-2 * c3 + c5] + A[-2 * c3 + c4 - 1][-2 * c3 + c5]);\n A[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1] = B[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1];\n }\n if (c2 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {\n A[-2 * c3 + c4 + -1][n + -2] = B[-2 * c3 + c4 + -1][n + -2];\n }\n }\n if (c1 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {\n for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -1?16 * c2 + 15 : 2 * c3 + n + -1)); c5++) {\n A[n + -2][-2 * c3 + c5 + -1] = B[n + -2][-2 * c3 + c5 + -1];\n }\n }\n }\n if (c0 >= (((2 * c1 + c2 + -1) * 2 < 0?-(-(2 * c1 + c2 + -1) / 2) : ((2 < 0?(-(2 * c1 + c2 + -1) + - 2 - 1) / - 2 : (2 * c1 + c2 + -1 + 2 - 1) / 2)))) && c1 >= c2 + 1 && c2 <= (((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8))) {\n for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < 16 * c2 + n + 12?16 * c1 + 15 : 16 * c2 + n + 12)); c4++) {\n B[-16 * c2 + c4 + -14][1] = 0.2 * (A[-16 * c2 + c4 + -14][1] + A[-16 * c2 + c4 + -14][1 - 1] + A[-16 * c2 + c4 + -14][1 + 1] + A[1 + (-16 * c2 + c4 + -14)][1] + A[-16 * c2 + c4 + -14 - 1][1]);\n }\n }\n if (c0 >= (((3 * c1 + -1) * 2 < 0?-(-(3 * c1 + -1) / 2) : ((2 < 0?(-(3 * c1 + -1) + - 2 - 1) / - 2 : (3 * c1 + -1 + 2 - 1) / 2)))) && c1 <= (((((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) < c2?(((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) : c2))) {\n for (c5 = (16 * c2 > 16 * c1 + 15?16 * c2 : 16 * c1 + 15); c5 <= ((16 * c2 + 15 < 16 * c1 + n + 12?16 * c2 + 15 : 16 * c1 + n + 12)); c5++) {\n B[1][-16 * c1 + c5 + -14] = 0.2 * (A[1][-16 * c1 + c5 + -14] + A[1][-16 * c1 + c5 + -14 - 1] + A[1][1 + (-16 * c1 + c5 + -14)] + A[1 + 1][-16 * c1 + c5 + -14] + A[1 - 1][-16 * c1 + c5 + -14]);\n }\n }\n }\n } #pragma omp parallel for private(c5, c4, c2, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB171-threadprivate3-orig-no.c", "omp_pragma_line": "#pragma omp parallel for default(shared)", "context_chars": 100, "text": "\n\nstatic double x[20];\n#pragma omp threadprivate(x)\n\nint main(){\n int i;\n double j,k;\n\n for (i = 0; i < 20; i++){\n x[i] = -1.0;\n if(omp_get_thread_num()==0){\n j = x[0];\n }\n if(omp_get_thread_num()==0){\n k = i+0.05;\n }\n } #pragma omp parallel for default(shared)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int i;\n int j;\n double a[20][20];\n memset(a,0,(sizeof(a)));\n\n for (i = 0; i < 20 -1; i += 1) {\nfor (j = 0; j < 20; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB197-diffusion2-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "0] = u[0][n - 1] = u[1][0] = u[1][n - 1] = 0.5;\n int p = 0;\n for (int t = 0; t < nsteps; t++)\n {\nfor (int i = 1; i < n - 1; i++)\n {\n u[1 - p][i] = u[p][i] + c * (u[p][i - 1] + u[p][i + 1] - 2 * u[p][i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB112-linear-orig-no.c", "omp_pragma_line": "#pragma omp parallel for linear(j)", "context_chars": 100, "text": "for (i=0;i #pragma omp parallel for linear(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB199-prodcons-yes.c", "omp_pragma_line": "#pragma omp parallel for shared(size, cap, nprod, ncons, nthread) firstprivate(packages) num_threads(nthread)", "context_chars": 100, "text": "= 4, ncons = 4;\nint cap = 5, size = 0, packages = 1000;\nint main()\n{\n int nthread = nprod + ncons;\nfor (int i = 0; i < nthread; i++)\n {\n if (i < nprod)\n while (packages)\n { // I am a producer\n#pragma omp critical(A)\n if (size < cap)\n {\n size++; // produce\n packages--; // produced a package\n printf(\"Producer %d produced! size=%d\\n\", i, size);\n fflush(stdout);\n }\n }\n else\n while (packages)\n { // I am a consumer\n#pragma omp critical(B)\n if (size > 0)\n {\n size--; // consume\n packages--; // consumed a package\n printf(\"Consumer %d consumed! size=%d\\n\", i - nprod, size);\n fflush(stdout);\n }\n }\n } #pragma omp parallel for shared(size, cap, nprod, ncons, nthread) firstprivate(packages) num_threads(nthread)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB016-outputdep-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "74:5:W vs. x@74:5:W\n*/\n\n#include \nint a[100];\n\nint main()\n{\n int len=100; \n int i,x=10;\n\nfor (i=0;i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB109-orderedmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ordered ", "context_chars": 100, "text": " Missing the ordered clause\n * Data race pair: x@56:5:W vs. x@56:5:W\n * */\nint main()\n{\n int x =0;\nfor (int i = 0; i < 100; ++i) {\n x++;\n } #pragma omp parallel for ordered "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB194-diffusion1-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ") / RAND_MAX;\n u1[0] = u1[n - 1] = u2[0] = u2[n - 1] = 0.5;\n for (int t = 0; t < nsteps; t++)\n {\nfor (int i = 1; i < n - 1; i++)\n {\n u2[i] = u1[i] + c * (u1[i - 1] + u1[i + 1] - 2 * u1[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB196-diffusion2-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "0] = u[0][n - 1] = u[1][0] = u[1][n - 1] = 0.5;\n int p = 0;\n for (int t = 0; t < nsteps; t++)\n {\nfor (int i = 1; i < n - 1; i++)\n {\n u[1 - p][i] = u[p][i] + c * (u[p][i - 1] + u[p][i + 1] - 2 * u[p][i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB008-indirectaccess4-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "/ initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n }\n\nfor (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0;\n xa2[idx]+= 3.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB110-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for ordered ", "context_chars": 100, "text": " Univ.\n * Proper user of ordered directive and clause, no data races\n * */\nint main()\n{\n int x =0;\nfor (int i = 0; i < 100; ++i) {\n#pragma omp ordered\n {\n x++;\n }\n } #pragma omp parallel for ordered "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "arallelized so no race condition.\n*/\nint n=100, m=100;\ndouble b[100][100];\n\nvoid foo()\n{\n int i,j;\nfor (i=0;i #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB017-outputdep-var-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ar* argv[])\n{\n int len=100; \n\n if (argc>1)\n len = atoi(argv[1]);\n\n int a[len];\n int i,x=10;\n\nfor (i=0;i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB059-lastprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) lastprivate (x)", "context_chars": 100, "text": "ue back to the shared one within the last iteration.\n*/\n#include \n\nvoid foo()\n{\n int i,x;\nfor (i=0;i<100;i++)\n x=i;\n printf(\"x=%d\",x);\n}\n\nint main()\n{\n foo();\n return 0;\n} #pragma omp parallel for private (i) lastprivate (x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c4, c2, c3)", "context_chars": 100, "text": "0 + 0])\n{\n //int i;\n //int j;\n{\n int c1;\n int c3;\n int c2;\n int c4;\n if (n >= 1) {\nfor (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {\n X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;\n A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;\n B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;\n }\n }\n }\n } #pragma omp parallel for private(c4, c2, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c15, c9, c8)", "context_chars": 100, "text": " if (n >= 1 && tsteps >= 1) {\n for (c0 = 0; c0 <= tsteps + -1; c0++) {\n if (n >= 2) {\nfor (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n#pragma omp simd\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n B[c15][c9] = B[c15][c9] - A[c15][c9] * A[c15][c9] / B[c15][c9 - 1];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n#pragma omp simd\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][c9] = X[c15][c9] - X[c15][c9 - 1] * A[c15][c9] / B[c15][c9 - 1];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {\n for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {\n#pragma omp simd\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][n - c9 - 2] = (X[c15][n - 2 - c9] - X[c15][n - 2 - c9 - 1] * A[c15][n - c9 - 3]) / B[c15][n - 3 - c9];\n }\n }\n }\n } #pragma omp parallel for private(c15, c9, c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c15)", "context_chars": 100, "text": "9 - 3]) / B[c15][n - 3 - c9];\n }\n }\n }\n }\n }\nfor (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n#pragma omp simd\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];\n }\n } #pragma omp parallel for private(c15)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c15, c9, c8)", "context_chars": 100, "text": " X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];\n }\n }\n if (n >= 2) {\nfor (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n#pragma omp simd\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n B[c9][c15] = B[c9][c15] - A[c9][c15] * A[c9][c15] / B[c9 - 1][c15];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n#pragma omp simd\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c9][c15] = X[c9][c15] - X[c9 - 1][c15] * A[c9][c15] / B[c9 - 1][c15];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {\n for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {\n#pragma omp simd\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[n - 2 - c9][c15] = (X[n - 2 - c9][c15] - X[n - c9 - 3][c15] * A[n - 3 - c9][c15]) / B[n - 2 - c9][c15];\n }\n }\n }\n } #pragma omp parallel for private(c15, c9, c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c15)", "context_chars": 100, "text": "][c15]) / B[n - 2 - c9][c15];\n }\n }\n }\n }\n }\nfor (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n#pragma omp simd\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15];\n }\n } #pragma omp parallel for private(c15)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB067-restrictpointer1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (length)", "context_chars": 100, "text": "f double real8;\n\nvoid foo(real8 * restrict newSxx, real8 * restrict newSyy, int length)\n{\n int i;\n\nfor (i = 0; i <= length - 1; i += 1) {\n newSxx[i] = 0.0;\n newSyy[i] = 0.0;\n } #pragma omp parallel for private (i) firstprivate (length)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB018-plusplus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")\n{\n int i ;\n int inLen=1000 ; \n int outLen = 0;\n\n for (i=0; ifor (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB035-truedepscalar-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "int main(int argc, char* argv[])\n{ \n int i; \n int tmp;\n tmp = 10;\n int len=100;\n\n int a[100];\n\nfor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB049-fprintf-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "open(\"mytempfile.txt\",\"a+\");\n if (pfile ==NULL)\n {\n fprintf(stderr,\"Error in fopen()\\n\");\n }\n\nfor (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB099-targetparallelfor2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "id foo (double* a, double* b, int N)\n{\n int i; \n#pragma omp target map(to:a[0:N]) map(from:b[0:N])\nfor (i=0;i< N ;i++)\n b[i]=a[i]*(double)i;\n}\n\nint main(int argc, char* argv[])\n{\n int i;\n int len = 1000;\n double a[len], b[len];\n for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB066-pointernoaliasing-orig-no.c", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "le* ) malloc (sizeof (double) * N );\n double * m_nvol = (double* ) malloc (sizeof (double) * N );\n\nfor (int i=0; i < N; ++i ) \n { \n m_pdv_sum[ i ] = 0.0;\n m_nvol[ i ] = i*2.5;\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "op parallelization\n*/\n#define N 100\n\ndouble a[N][N],v[N],v_out[N];\nint mv()\n{ \n int i,j;\nfor (i = 0; i < N; i++)\n { \n float sum = 0.0;\n for (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } \n v_out[i] = sum;\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB094-doall2-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for ordered(2)", "context_chars": 100, "text": "s needed to compile this test.\"\n#endif\n#include \nint a[100][100];\nint main()\n{\n int i, j;\nfor (i = 0; i < 100; i++)\n for (j = 0; j < 100; j++)\n {\n a[i][j] = a[i][j] + 1;\n#pragma omp ordered depend(sink:i-1,j) depend (sink:i,j-1)\n printf (\"test i=%d j=%d\\n\",i,j);\n#pragma omp ordered depend(source)\n } #pragma omp parallel for ordered(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " 0],double B[500 + 0][500 + 0])\n{\n //int i;\n //int j;\n{\n int c2;\n int c1;\n if (n >= 1) {\nfor (c1 = 0; c1 <= n + -1; c1++) {\n for (c2 = 0; c2 <= n + -1; c2++) {\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "[(-2 * c0 + 3 * c2 + 2) / 3] + A[1 - 1][(-2 * c0 + 3 * c2 + 2) / 3]);\n }\n }\n }\nfor (c1 = ((((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) > c0 + -9?(((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) : c0 + -9); c1 <= (((((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) < c0?(((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) : c0)); c1++) {\n B[-2 * c0 + 3 * c1][1] = 0.2 * (A[-2 * c0 + 3 * c1][1] + A[-2 * c0 + 3 * c1][1 - 1] + A[-2 * c0 + 3 * c1][1 + 1] + A[1 + (-2 * c0 + 3 * c1)][1] + A[-2 * c0 + 3 * c1 - 1][1]);\n for (c2 = 2 * c0 + -2 * c1 + 2; c2 <= 2 * c0 + -2 * c1 + 498; c2++) {\n A[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1] = B[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1];\n B[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] = 0.2 * (A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2 - 1] + A[-2 * c0 + 3 * c1][1 + (-2 * c0 + 2 * c1 + c2)] + A[1 + (-2 * c0 + 3 * c1)][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1 - 1][-2 * c0 + 2 * c1 + c2]);\n }\n A[-2 * c0 + 3 * c1 + -1][498] = B[-2 * c0 + 3 * c1 + -1][498];\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB036-truedepscalar-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt i; \n int tmp;\n tmp = 10;\n int len=100;\n\n if (argc>1)\n len = atoi(argv[1]);\n\n int a[len];\nfor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB065-pireduction-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:pi) private(x)", "context_chars": 100, "text": "uble pi = 0.0;\n long int i;\n double x, interval_width;\n interval_width = 1.0/(double)num_steps;\n\nfor (i = 0; i < num_steps; i++) {\n x = (i+ 0.5) * interval_width;\n pi += 1.0 / (x*x + 1.0);\n } #pragma omp parallel for reduction(+:pi) private(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt len=100;\n if (argc>1)\n len = atoi(argv[1]);\n int a[len];\n for (i=0;ifor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB006-indirectaccess2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "/ initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n }\n\nfor (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0;\n xa2[idx]+= 3.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB179-thread-sensitivity-yes.c", "omp_pragma_line": "#pragma omp parallel for shared(A)", "context_chars": 100, "text": "ain(int argc, char *argv[]) {\n\n int *A; \n int N = 100;\n\n A = (int*) malloc(sizeof(int) * N);\n\n \nfor(int i = 0; i < N; i++) {\n A[i] = i;\n if (i == 1) \n { \n A[0] = 1; \n }\n } #pragma omp parallel for shared(A)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB170-nestedloops-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j,k,tmp1) ", "context_chars": 100, "text": "#include \n\nint main(){\n int i,j,k,m;\n double tmp1;\n\n double a[12][12][12];\n\n m = 3.0;\n\n for (i = 0; i < 12; i++) {\n for (j = 0; j < 12; j++) {\n for (k = 0; k < 12; k++) {\n tmp1 = 6.0/m;\n a[i][j][k] = tmp1+4;\n }\n }\n } #pragma omp parallel for private(j,k,tmp1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* \nArrays passed as function parameters\n*/\nvoid foo1(double o1[], double c[], int len)\n{ \n int i ;\nfor (i = 0; i < len; ++i) {\n double volnew_o8 = 0.5 * c[i];\n o1[i] = volnew_o8;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,xx,yy)", "context_chars": 100, "text": " xx, yy;\n\n dx = 2.0 / (n - 1);\n dy = 2.0 / (m - 1);\n\n/* Initialize initial condition and RHS */\n//for (i = 0; i < n; i++)\n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(i,j,xx,yy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "[N];\n\nvoid mv()\n{ \n int i,j;\n for (i = 0; i < N; i++)\n { \n float sum = 0.0;\nfor (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": "1?0 : -1 * nj + -1 * nm + 1)) && nj >= 0 && nk >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nm >= 0) {\nfor (c1 = 0; c1 <= (((((nk + ni + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + ni + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + ni + nj + nm + -1) + 16 - 1) / 16))) : (nk + ni + nj + nm + -1) / 16)) < (((nk + ni + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + ni + nj + 2 * nm + -2) / 16))?(((nk + ni + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + ni + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + ni + nj + nm + -1) + 16 - 1) / 16))) : (nk + ni + nj + nm + -1) / 16)) : (((nk + ni + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + ni + nj + 2 * nm + -2) / 16)))); c1++) {\n if (c1 <= (((((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {\n for (c2 = 0; c2 <= (((((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)) < nm + -1?((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)) < nm + -1?((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nl > nm?nl : nm); c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nj > nl?nj : nl); c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (nj > nm?nj : nm); c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nk > nl?nk : nl); c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (nk > nm?nk : nm); c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = (nj > nk?nj : nk); c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nm; c4 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n for (c3 = nj; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nj; c4 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nl + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n for (c3 = (nj > nm?nj : nm); c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n for (c3 = nk; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nk; c4 <= ((((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)) < nm + -1?((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = (nk > nl?nk : nl); c4 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n for (c3 = (nk > nm?nk : nm); c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nm; c4 <= nk + -1; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nk; c4 <= nm + -1; c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n for (c3 = (nj > nk?nj : nk); c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n for (c3 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = (ni > nm?ni : nm); c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n for (c3 = (ni > nj?ni : nj); c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n for (c3 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = (ni > nk?ni : nk); c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n for (c3 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nj; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = (ni > nj?ni : nj); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = (ni > nk?ni : nk); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (nk > nm?nk : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = (nj > nk?nj : nk); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n for (c3 = nk; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = (ni > nj?ni : nj); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = (ni > nk?ni : nk); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nj > nl?nj : nl); c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = nm; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = nk; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = (nk > nm?nk : nm); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = (ni > nm?ni : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = (ni > nk?ni : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nk > nl?nk : nl); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = (nj > nk?nj : nk); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = (ni > nm?ni : nm); c3 <= 16 * c1 + 15; c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) {\n for (c2 = (((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nk > nl?nk : nl); c4 <= 16 * c2 + 15; c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nm; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= 16 * c2 + 15; c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = (nj > nm?nj : nm); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= 16 * c2 + 15; c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = (ni > nm?ni : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = (ni > nj?ni : nj); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) && c1 >= ((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))))) {\n for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nk > nl?nk : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (nk > nm?nk : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = (ni > nj?ni : nj); c3 <= 16 * c1 + 15; c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))))) {\n for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {\n for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) {\n for (c2 = (((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n for (c3 = nj; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n for (c3 = (nj > nm?nj : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n for (c3 = (nk > nm?nk : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= 16 * c1 + 15; c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {\n for (c2 = (((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {\n for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nk; c3 <= 16 * c1 + 15; c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {\n for (c2 = (((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nk; c3 <= 16 * c1 + 15; c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {\n for (c2 = (((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {\n for (c2 = (((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= 16 * c2 + 15; c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {\n for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nj; c3 <= 16 * c1 + 15; c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {\n for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = (((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {\n for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nj; c3 <= 16 * c1 + 15; c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {\n for (c2 = (((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))))) {\n for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {\n for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {\n for (c2 = (((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " }\n }\n if (ni >= ((0 > -1 * nj + 1?0 : -1 * nj + 1)) && nj >= 0 && nk >= 1 && nm <= -1) {\nfor (c1 = 0; c1 <= (((((nk + ni + -1) * 16 < 0?((16 < 0?-((-(nk + ni + -1) + 16 + 1) / 16) : -((-(nk + ni + -1) + 16 - 1) / 16))) : (nk + ni + -1) / 16)) < (((nk + ni + nj + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + -2) + 16 - 1) / 16))) : (nk + ni + nj + -2) / 16))?(((nk + ni + -1) * 16 < 0?((16 < 0?-((-(nk + ni + -1) + 16 + 1) / 16) : -((-(nk + ni + -1) + 16 - 1) / 16))) : (nk + ni + -1) / 16)) : (((nk + ni + nj + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + -2) + 16 - 1) / 16))) : (nk + ni + nj + -2) / 16)))); c1++) {\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) {\n for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) {\n for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {\n for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " }\n }\n if (ni >= 0 && nj <= -1 && nk >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nm >= 0) {\nfor (c1 = 0; c1 <= (((ni + nm + -1) * 16 < 0?((16 < 0?-((-(ni + nm + -1) + 16 + 1) / 16) : -((-(ni + nm + -1) + 16 - 1) / 16))) : (ni + nm + -1) / 16)); c1++) {\n if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {\n for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n#pragma omp simd\n for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {\n for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n }\n if (nj <= -1 && nk >= 1 && nm <= -1) {\nfor (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {\n A[c3][c4] = ((double )c3) * c4 / ni;\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n if (ni >= 0 && nj >= 0 && nk <= -1 && nm >= 1) {\nfor (c1 = 0; c1 <= (((nj + nm + -1) * 16 < 0?((16 < 0?-((-(nj + nm + -1) + 16 + 1) / 16) : -((-(nj + nm + -1) + 16 - 1) / 16))) : (nj + nm + -1) / 16)); c1++) {\n if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n if (ni >= 0 && nj <= -1 && nk <= -1 && nl >= 1) {\nfor (c1 = 0; c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": "j >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nk >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nm >= 0) {\nfor (c1 = 0; c1 <= (((((nk + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + nj + nm + -1) + 16 - 1) / 16))) : (nk + nj + nm + -1) / 16)) < (((nk + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + nj + 2 * nm + -2) / 16))?(((nk + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + nj + nm + -1) + 16 - 1) / 16))) : (nk + nj + nm + -1) / 16)) : (((nk + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + nj + 2 * nm + -2) / 16)))); c1++) {\n if (c1 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {\n for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n for (c3 = nj; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n if (c1 == c2) {\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n for (c3 = (nj > nm?nj : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = (nk > nm?nk : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n#pragma omp simd\n for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {\n for (c2 = (((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n }\n if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {\n for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nj; c3 <= 16 * c1 + 15; c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {\n for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {\n for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {\n for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n }\n if (ni <= -1 && nj >= 1 && nm <= -1) {\nfor (c1 = 0; c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {\n B[c3][c4] = ((double )c3) * (c4 + 1) / nj;\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n if (ni <= -1 && nj <= -1 && nk >= 0 && nl >= 1) {\nfor (c1 = 0; c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n if (ni <= -1 && nj >= 0 && nk <= -1 && nm >= 1) {\nfor (c1 = 0; c1 <= (((nj + nm + -1) * 16 < 0?((16 < 0?-((-(nj + nm + -1) + 16 + 1) / 16) : -((-(nj + nm + -1) + 16 - 1) / 16))) : (nj + nm + -1) / 16)); c1++) {\n if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n#pragma omp simd\n for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n#pragma omp simd\n for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {\n for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {\n C[c3][c4] = ((double )c3) * (c4 + 3) / nl;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {\n for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {\n for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n if (ni <= -1 && nj <= -1 && nk <= -1 && nl >= 1) {\nfor (c1 = 0; c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {\n#pragma omp simd\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {\n D[c3][c4] = ((double )c3) * (c4 + 2) / nk;\n }\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c7, c2, c10)", "context_chars": 100, "text": " int c10;\n int c2;\n int c1;\n int c6;\n int c7;\n if (ni >= 0 && nj >= 0 && nl >= 1) {\nfor (c1 = 0; c1 <= (((nj + ni + -1) * 16 < 0?((16 < 0?-((-(nj + ni + -1) + 16 + 1) / 16) : -((-(nj + ni + -1) + 16 - 1) / 16))) : (nj + ni + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) {\n for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {\n#pragma omp simd\n for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {\n G[c10][c7] = 0;\n }\n }\n }\n if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {\n for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {\n#pragma omp simd\n for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c10++) {\n F[c10][c7] = 0;\n }\n }\n }\n }\n } #pragma omp parallel for private(c7, c2, c10)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c7, c2, c10)", "context_chars": 100, "text": "0;\n }\n }\n }\n }\n }\n }\n if (ni <= -1 && nl >= 1) {\nfor (c1 = 0; c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {\n#pragma omp simd\n for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c10++) {\n F[c10][c7] = 0;\n }\n }\n }\n } #pragma omp parallel for private(c7, c2, c10)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c7, c2, c10)", "context_chars": 100, "text": " F[c10][c7] = 0;\n }\n }\n }\n }\n }\n if (nj <= -1 && nl >= 1) {\nfor (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {\n#pragma omp simd\n for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {\n G[c10][c7] = 0;\n }\n }\n }\n } #pragma omp parallel for private(c7, c2, c10)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c7, c6, c2, c10, c5)", "context_chars": 100, "text": " G[c10][c7] = 0;\n }\n }\n }\n }\n }\n if (nl >= 1 && nm >= 1) {\nfor (c1 = 0; c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {\n for (c5 = 0; c5 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c5++) {\n for (c6 = 16 * c5; c6 <= ((16 * c5 + 15 < nm + -1?16 * c5 + 15 : nm + -1)); c6++) {\n for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {\n#pragma omp simd\n for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c10++) {\n F[c10][c7] += C[c10][c6] * D[c6][c7];\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(c7, c6, c2, c10, c5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c7, c2, c10)", "context_chars": 100, "text": " }\n }\n }\n }\n }\n }\n }\n if (nj >= 1) {\nfor (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {\n for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c7++) {\n#pragma omp simd\n for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {\n E[c10][c7] = 0;\n }\n }\n }\n } #pragma omp parallel for private(c7, c2, c10)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB042-3mm-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c7, c6, c2, c10, c5)", "context_chars": 100, "text": "\n E[c10][c7] = 0;\n }\n }\n }\n }\n }\n if (nj >= 1) {\nfor (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {\n for (c5 = 0; c5 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c5++) {\n for (c6 = 16 * c5; c6 <= ((16 * c5 + 15 < nk + -1?16 * c5 + 15 : nk + -1)); c6++) {\n for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c7++) {\n#pragma omp simd\n for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {\n E[c10][c7] += A[c10][c6] * B[c6][c7];\n }\n }\n }\n }\n for (c5 = 0; c5 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c5++) {\n for (c6 = 16 * c5; c6 <= ((16 * c5 + 15 < nl + -1?16 * c5 + 15 : nl + -1)); c6++) {\n for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c7++) {\n#pragma omp simd\n for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {\n G[c10][c6] += E[c10][c7] * F[c7][c6];\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(c7, c6, c2, c10, c5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB111-linearmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "for (i=0;i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "char* argv[])\n{\n int i;\n int tmp;\n int len=100;\n int a[100];\n for (i=0;ifor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for firstprivate (g)", "context_chars": 100, "text": "OF SUCH DAMAGE.\n*/\n\n\n/*\nExample use of firstprivate()\n*/\nvoid foo(int * a, int n, int g)\n{\n int i;\nfor (i=0;i #pragma omp parallel for firstprivate (g)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB155-missingordered-orig-gpu-no.c", "omp_pragma_line": "#pragma omp parallel for ordered", "context_chars": 100, "text": " for(int i=0; ifor (int i=1; i #pragma omp parallel for ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "ost loop can be parallelized. \n \n*/\nint n=100, m=100;\ndouble b[100][100];\n\nvoid foo()\n{\n int i,j;\nfor (i=0;i #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB178-input-dependence-var-yes.c", "omp_pragma_line": "#pragma omp parallel for shared(A)", "context_chars": 100, "text": "(argc>1)\n N = atoi(argv[1]);\n\n A = (int*) malloc(sizeof(int) * N);\n\n load_from_input(A, N);\n \nfor(int i = 0; i < N; i++) {\n A[i] = i;\n if (N > 10000) \n { \n A[0] = 1; \n }\n } #pragma omp parallel for shared(A)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "0],double D[128 + 0][128 + 0])\n{\n //int i;\n //int j;\n{\n int c2;\n int c1;\n if (nl >= 1) {\nfor (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) < nm + -1?((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nl > nm?nl : nm); c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((((nk + -1 < nl + -1?nk + -1 : nl + -1)) < nm + -1?((nk + -1 < nl + -1?nk + -1 : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nj > nl?nj : nl); c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nj > nm?nj : nm); c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nk > nl?nk : nl); c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nk > nm?nk : nm); c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = (nj > nk?nj : nk); c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ") {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nl <= 0) {\nfor (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nj > nm?nj : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nk > nm?nk : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = (nj > nk?nj : nk); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ") {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nm >= 1) {\nfor (c1 = nm; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ") {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nm <= 0) {\nfor (c1 = 0; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n for (c2 = 0; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nj >= 1 && nl >= 1) {\nfor (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nj > nl?nj : nl); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nj >= 1 && nl <= 0) {\nfor (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nj >= 1) {\nfor (c1 = (nj > nm?nj : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nj <= 0 && nl >= 1) {\nfor (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nj <= 0 && nl <= 0) {\nfor (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nj <= 0) {\nfor (c1 = (0 > nm?0 : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\nfor (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nk; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nk > nl?nk : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl <= 0) {\nfor (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk >= 1 && nm >= 1) {\nfor (c1 = (nk > nm?nk : nm); c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n for (c2 = 0; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk >= 1 && nm <= 0) {\nfor (c1 = nk; c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\nfor (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl <= 0) {\nfor (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nk >= 1) {\nfor (c1 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c1 <= ni + -1; c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nl >= 1) {\nfor (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nl > nm?nl : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nj > nl?nj : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nj > nm?nj : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ") {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nl <= 0) {\nfor (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ") {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nm >= 1) {\nfor (c1 = (ni > nm?ni : nm); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ") {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nm <= 0) {\nfor (c1 = (0 > ni?0 : ni); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nj >= 1 && nl >= 1) {\nfor (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nj >= 1 && nl <= 0) {\nfor (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ") {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nj >= 1) {\nfor (c1 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c1 <= nk + -1; c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\nfor (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl <= 0) {\nfor (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk >= 1 && nm >= 1) {\nfor (c1 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c1 <= nj + -1; c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk <= 0 && nl >= 1) {\nfor (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk <= 0 && nl <= 0) {\nfor (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk <= 0 && nm >= 1) {\nfor (c1 = nm; c1 <= nj + -1; c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nj <= 0 && nl >= 1) {\nfor (c1 = (0 > ni?0 : ni); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\nfor (c1 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c1 <= nm + -1; c1++) {\n for (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk <= 0 && nl >= 1) {\nfor (c1 = (0 > nj?0 : nj); c1 <= nm + -1; c1++) {\n for (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " + 0])\n{\n //int i;\n //int j;\n //int k;\n \n //#pragma scop\n{\n int c1;\n int c2;\n int c5;\nfor (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n G[c1][c2] = 0;\n F[c1][c2] = 0;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5, c2)", "context_chars": 100, "text": "{\n for (c2 = 0; c2 <= 127; c2++) {\n G[c1][c2] = 0;\n F[c1][c2] = 0;\n }\n }\nfor (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n for (c5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n }\n }\n } #pragma omp parallel for private(c5, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "r (c5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n }\n }\n }\nfor (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n E[c1][c2] = 0;\n }\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5, c2)", "context_chars": 100, "text": " = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n E[c1][c2] = 0;\n }\n }\nfor (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n for (c5 = 0; c5 <= 127; c5++) {\n E[c1][c2] += A[c1][c5] * B[c5][c2];\n }\n for (c5 = 0; c5 <= 127; c5++) {\n G[c1][c5] += E[c1][c2] * F[c2][c5];\n }\n }\n } #pragma omp parallel for private(c5, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "0; \n\n double a[20][20];\n\n for (i=0; i< len; i++)\n for (j=0; jfor (i = 0; i < len - 1; i += 1) {\n for (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB005-indirectaccess1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1)", "context_chars": 100, "text": "[i]=0.5*i;\n }\n// default static even scheduling may not trigger data race, using static,1 instead.\nfor (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0 + i;\n xa2[idx]+= 3.0 + i;\n } #pragma omp parallel for schedule(static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j,k)", "context_chars": 100, "text": " \n#define K 100\ndouble a[N][M],b[M][K],c[N][K];\n \nint mmm() \n{ \n int i,j,k;\nfor (i = 0; i < N; i++) \n for (k = 0; k < K; k++) \n for (j = 0; j < M; j++)\n c[i][j]= c[i][j]+a[i][k]*b[k][j];\n return 0; \n} \n\nint main()\n{\n mmm();\n return 0;\n} #pragma omp parallel for private(j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB007-indirectaccess3-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "/ initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n }\n\nfor (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0;\n xa2[idx]+= 3.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ialize x[]\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n }\n\nfor (i=numNodes-1 ; i>-1 ; --i) {\n if (x[i]<=0) {\n numNodes2-- ;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB052-indirectaccesssharebase-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "base;\n double * xa2 = base + 12;\n int i;\n\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.0;\n }\n\nfor (i =0; i< N; ++i) // this level of loop has no loop carried dependence\n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0;\n xa2[idx]+= 3.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB068-restrictpointer2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "clude \n\nvoid foo(int n, int * restrict a, int * restrict b, int * restrict c)\n{\n int i;\nfor (i = 0; i < n; i++)\n a[i] = b[i] + c[i]; \n}\n\nint main()\n{\n int n = 1000;\n int * a , *b, *c;\n\n a = (int*) malloc (n* sizeof (int));\n if (a ==0)\n {\n fprintf (stderr, \"skip the execution due to malloc failures.\\n\");\n return 1;\n }\n\n b = (int*) malloc (n* sizeof (int));\n if (b ==0)\n {\n fprintf (stderr, \"skip the execution due to malloc failures.\\n\");\n return 1;\n }\n\n c = (int*) malloc (n* sizeof (int));\n if (c ==0)\n {\n fprintf (stderr, \"skip the execution due to malloc failures.\\n\");\n return 1;\n }\n\n foo (n, a, b,c);\n\n free (a);\n free (b);\n free (c);\n return 0;\n} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB198-prodcons-no.c", "omp_pragma_line": "#pragma omp parallel for shared(size, cap, nprod, ncons, nthread) firstprivate(packages) num_threads(nthread)", "context_chars": 100, "text": "= 4, ncons = 4;\nint cap = 5, size = 0, packages = 1000;\nint main()\n{\n int nthread = nprod + ncons;\nfor (int i = 0; i < nthread; i++)\n {\n if (i < nprod)\n while (packages)\n { // I am a producer\n#pragma omp critical\n if (size < cap)\n {\n size++; // produce\n packages--; // produced a package\n printf(\"Producer %d produced! size=%d\\n\", i, size);\n fflush(stdout);\n }\n }\n else\n while (packages)\n { // I am a consumer\n#pragma omp critical\n if (size > 0)\n {\n size--; // consume\n packages--; // consumed a package\n printf(\"Consumer %d consumed! size=%d\\n\", i - nprod, size);\n fflush(stdout);\n }\n }\n } #pragma omp parallel for shared(size, cap, nprod, ncons, nthread) firstprivate(packages) num_threads(nthread)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB057-jacobiinitialize-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,xx,yy)", "context_chars": 100, "text": " xx, yy;\n\n dx = 2.0 / (n - 1);\n dy = 2.0 / (m - 1);\n\n /* Initialize initial condition and RHS */\nfor (i = 0; i < n; i++)\n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(i,j,xx,yy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": ";\n\n double a[len][len];\n\n for (i=0; i< len; i++)\n for (j=0; jfor (i = 0; i < len - 1; i += 1) {\n for (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/DRB169-missingsyncwrite-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j,k)", "context_chars": 100, "text": "(j = 0; j < N; j++) {\n for (k = 0; k < N; k++) {\n r[i][j][k] = i;\n }\n }\n }\n\n\n for (i = 1; i < N-1; i++) {\n for (j = 1; j < N-1; j++) {\n for (k = 0; k < N; k++) {\n r1[k] = r[i][j-1][k] + r[i][j+1][k] + r[i-1][j][k] + r[i+1][j][k];\n }\n }\n } #pragma omp parallel for default(shared) private(j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/dataracebench/micro-benchmarks/utilities/polybench.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tmp)", "context_chars": 100, "text": " double* flush = (double*) calloc (cs, sizeof(double));\n int i;\n double tmp = 0.0;\n#ifdef _OPENMP\nfor (i = 0; i < cs; i++)\n tmp += flush[i];\n assert (tmp <= 10.0);\n free (flush);\n}\n\n\n#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER\nvoid polybench_linux_fifo_scheduler()\n{\n /* Use FIFO scheduler to limit OS interference. Program must be run\n as root, and this works only for Linux kernels. */\n struct sched_param schedParam;\n schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);\n sched_setscheduler (0, SCHED_FIFO, &schedParam);\n} #pragma omp parallel for reduction(+:tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/cvodes/C_openmp/cvsAdvDiff_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, \\", "context_chars": 100, "text": "= data->hdcoef;\n horac = data->hacoef;\n verdc = data->vdcoef;\n\n /* Loop over all grid points. */\nurt, hdiff, hadv, vdiff) \\\n num_threads(data->nthreads)\n for (j = 1; j <= MY; j++)\n {\n for (i = 1; i <= MX; i++)\n {\n /* Extract u at x_i, y_j and four neighboring points */\n\n uij = IJth(udata, i, j);\n udn = (j == 1) ? ZERO : IJth(udata, i, j - 1);\n uup = (j == MY) ? ZERO : IJth(udata, i, j + 1);\n ult = (i == 1) ? ZERO : IJth(udata, i - 1, j);\n urt = (i == MX) ? ZERO : IJth(udata, i + 1, j);\n\n /* Set diffusion and advection terms and load into udot */\n\n hdiff = hordc * (ult - TWO * uij + urt);\n hadv = horac * (urt - ult);\n vdiff = verdc * (uup - TWO * uij + udn);\n IJth(dudata, i, j) = hdiff + hadv + vdiff;\n }\n } #pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/cvodes/C_openmp/cvsAdvDiff_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) \\", "context_chars": 100, "text": "ta = (UserData)user_data;\n hordc = data->hdcoef;\n horac = data->hacoef;\n verdc = data->vdcoef;\n\nnum_threads(data->nthreads)\n for (j = 1; j <= MY; j++)\n {\n for (i = 1; i <= MX; i++)\n {\n k = j - 1 + (i - 1) * MY;\n kthCol = SUNBandMatrix_Column(J, k);\n\n /* set the kth column of J */\n\n SM_COLUMN_ELEMENT_B(kthCol, k, k) = -TWO * (verdc + hordc);\n if (i != 1) { SM_COLUMN_ELEMENT_B(kthCol, k - MY, k) = hordc + horac; }\n if (i != MX) { SM_COLUMN_ELEMENT_B(kthCol, k + MY, k) = hordc - horac; }\n if (j != 1) { SM_COLUMN_ELEMENT_B(kthCol, k - 1, k) = verdc; }\n if (j != MY) { SM_COLUMN_ELEMENT_B(kthCol, k + 1, k) = verdc; }\n }\n } #pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/cvodes/C_openmp/cvsAdvDiff_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j, i, y, x)", "context_chars": 100, "text": "o data array in vector u. */\n\n udata = NV_DATA_OMP(u);\n\n /* Load initial profile into u vector */\nfor (j = 1; j <= MY; j++)\n {\n y = j * dy;\n for (i = 1; i <= MX; i++)\n {\n x = i * dx;\n IJth(udata, i, j) = x * (XMAX - x) * y * (YMAX - y) * exp(FIVE * x * y);\n }\n } #pragma omp parallel for default(shared) private(j, i, y, x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/kinsol/C_openmp/kinFoodWeb_kry_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "y[j] = csave;\n\n /* Load the j-th column of difference quotients */\n Pxycol = Pxy[j];\n\nfor (i = 0; i < NUM_SPECIES; i++)\n {\n Pxycol[i] = (perturb_rates[i] - ratesxy[i]) * fac;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/kinsol/C_openmp/kinFoodWeb_kry_omp.c", "omp_pragma_line": "#pragma omp parallel for collapse(2) default( \\", "context_chars": 100, "text": "*vxy;\n sunindextype *piv, jx, jy;\n UserData data;\n\n jx = jy = 0;\n\n data = (UserData)user_data;\n\nshared) private(jx, jy, Pxy, piv, vxy) schedule(static)\n for (jx = 0; jx < MX; jx++)\n {\n for (jy = 0; jy < MY; jy++)\n {\n /* For each (jx,jy), solve a linear system of size NUM_SPECIES.\n vxy is the address of the corresponding portion of the vector vv;\n Pxy is the address of the corresponding block of the matrix P;\n piv is the address of the corresponding block of the array pivot. */\n vxy = IJ_Vptr(vv, jx, jy);\n Pxy = (data->P)[jx][jy];\n piv = (data->pivot)[jx][jy];\n SUNDlsMat_denseGETRS(Pxy, NUM_SPECIES, piv, vxy);\n\n } /* end of jy loop */\n\n } #pragma omp parallel for collapse(2) default( \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/kinsol/C_openmp/kinFoodWeb_kry_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "i++)\n {\n ratesxy[i] = DotProd(NUM_SPECIES, cxy, acoef[i]);\n }\n\n fac = ONE + ALPHA * xx * yy;\n\nfor (i = 0; i < NUM_SPECIES; i++)\n {\n ratesxy[i] = cxy[i] * (bcoef[i] * fac + ratesxy[i]);\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/arkode/C_openmp/ark_heat1D_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i) schedule(static) \\", "context_chars": 100, "text": "= -SUN_RCONST(2.0) * k / dx / dx;\n isource = N / 2;\n Ydot[0] = 0.0; /* left boundary condition */\nnum_threads(udata->nthreads)\n for (i = 1; i < N - 1; i++)\n {\n Ydot[i] = c1 * Y[i - 1] + c2 * Y[i] + c1 * Y[i + 1];\n } #pragma omp parallel for default(shared) private(i) schedule(static) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/arkode/C_openmp/ark_heat1D_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i) schedule(static) \\", "context_chars": 100, "text": "-vector products */\n c1 = k / dx / dx;\n c2 = -SUN_RCONST(2.0) * k / dx / dx;\n JV[0] = 0.0;\nnum_threads(udata->nthreads)\n for (i = 1; i < N - 1; i++)\n {\n JV[i] = c1 * V[i - 1] + c2 * V[i] + c1 * V[i + 1];\n } #pragma omp parallel for default(shared) private(i) schedule(static) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/arkode/C_openmp/ark_brusselator1D_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i, u, ul, ur, v, vl, vr, w, \\", "context_chars": 100, "text": "mputing all equations */\n uconst = du / dx / dx;\n vconst = dv / dx / dx;\n wconst = dw / dx / dx;\nwl, wr) schedule(static) \\\n num_threads(udata->nthreads)\n for (i = 1; i < N - 1; i++)\n {\n /* set shortcuts */\n u = Ydata[IDX(i, 0)];\n ul = Ydata[IDX(i - 1, 0)];\n ur = Ydata[IDX(i + 1, 0)];\n v = Ydata[IDX(i, 1)];\n vl = Ydata[IDX(i - 1, 1)];\n vr = Ydata[IDX(i + 1, 1)];\n w = Ydata[IDX(i, 2)];\n wl = Ydata[IDX(i - 1, 2)];\n wr = Ydata[IDX(i + 1, 2)];\n\n /* u_t = du*u_xx + a - (w+1)*u + v*u^2 */\n dYdata[IDX(i, 0)] = (ul - SUN_RCONST(2.0) * u + ur) * uconst + a -\n (w + SUN_RCONST(1.0)) * u + v * u * u;\n\n /* v_t = dv*v_xx + w*u - v*u^2 */\n dYdata[IDX(i, 1)] = (vl - SUN_RCONST(2.0) * v + vr) * vconst + w * u -\n v * u * u;\n\n /* w_t = dw*w_xx + (b-w)/ep - w*u */\n dYdata[IDX(i, 2)] = (wl - SUN_RCONST(2.0) * w + wr) * wconst +\n (b - w) / ep - w * u;\n } #pragma omp parallel for default(shared) private(i, u, ul, ur, v, vl, vr, w, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/arkode/C_openmp/ark_brusselator1D_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i) schedule(static) \\", "context_chars": 100, "text": "type wconst = c * udata->dw / dx / dx;\n\n /* iterate over intervals, filling in Jacobian entries */\nnum_threads(udata->nthreads)\n for (i = 1; i < N - 1; i++)\n {\n /* Jacobian of (L*y) at this node */\n SM_ELEMENT_B(Jac, IDX(i, 0), IDX(i - 1, 0)) += uconst;\n SM_ELEMENT_B(Jac, IDX(i, 1), IDX(i - 1, 1)) += vconst;\n SM_ELEMENT_B(Jac, IDX(i, 2), IDX(i - 1, 2)) += wconst;\n SM_ELEMENT_B(Jac, IDX(i, 0), IDX(i, 0)) -= SUN_RCONST(2.0) * uconst;\n SM_ELEMENT_B(Jac, IDX(i, 1), IDX(i, 1)) -= SUN_RCONST(2.0) * vconst;\n SM_ELEMENT_B(Jac, IDX(i, 2), IDX(i, 2)) -= SUN_RCONST(2.0) * wconst;\n SM_ELEMENT_B(Jac, IDX(i, 0), IDX(i + 1, 0)) += uconst;\n SM_ELEMENT_B(Jac, IDX(i, 1), IDX(i + 1, 1)) += vconst;\n SM_ELEMENT_B(Jac, IDX(i, 2), IDX(i + 1, 2)) += wconst;\n } #pragma omp parallel for default(shared) private(i) schedule(static) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/arkode/C_openmp/ark_brusselator1D_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i, u, v, w) schedule(static) \\", "context_chars": 100, "text": "a, \"N_VGetArrayPointer\", 0)) { return 1; }\n\n /* iterate over nodes, filling in Jacobian entries */\nnum_threads(udata->nthreads)\n for (i = 1; i < N - 1; i++)\n {\n /* set nodal value shortcuts (shifted index due to start at first interior node) */\n u = Ydata[IDX(i, 0)];\n v = Ydata[IDX(i, 1)];\n w = Ydata[IDX(i, 2)];\n\n /* all vars wrt u */\n SM_ELEMENT_B(Jac, IDX(i, 0), IDX(i, 0)) +=\n c * (SUN_RCONST(2.0) * u * v - (w + SUN_RCONST(1.0)));\n SM_ELEMENT_B(Jac, IDX(i, 1), IDX(i, 0)) += c * (w - SUN_RCONST(2.0) * u * v);\n SM_ELEMENT_B(Jac, IDX(i, 2), IDX(i, 0)) += c * (-w);\n\n /* all vars wrt v */\n SM_ELEMENT_B(Jac, IDX(i, 0), IDX(i, 1)) += c * (u * u);\n SM_ELEMENT_B(Jac, IDX(i, 1), IDX(i, 1)) += c * (-u * u);\n\n /* all vars wrt w */\n SM_ELEMENT_B(Jac, IDX(i, 0), IDX(i, 2)) += c * (-u);\n SM_ELEMENT_B(Jac, IDX(i, 1), IDX(i, 2)) += c * (u);\n SM_ELEMENT_B(Jac, IDX(i, 2), IDX(i, 2)) += c * (-SUN_RCONST(1.0) / ep - u);\n } #pragma omp parallel for default(shared) private(i, u, v, w) schedule(static) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/ida/C_openmp/idaFoodWeb_kry_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) \\", "context_chars": 100, "text": "idual values appropriately\n for differential or algebraic components. */\nschedule(static) num_threads(webdata->nthreads)\n for (jy = 0; jy < MY; jy++)\n {\n yloc = NSMX * jy;\n for (jx = 0; jx < MX; jx++)\n {\n loc = yloc + NUM_SPECIES * jx;\n for (is = 0; is < NUM_SPECIES; is++)\n {\n if (is < np) { resv[loc + is] = cpv[loc + is] - resv[loc + is]; }\n else { resv[loc + is] = -resv[loc + is]; }\n }\n }\n } #pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/ida/C_openmp/idaFoodWeb_kry_omp.c", "omp_pragma_line": "#pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, \\", "context_chars": 100, "text": " UserData webdata;\n\n jx = jy = 0;\n\n webdata = (UserData)user_data;\n\n N_VScale(ONE, rvec, zvec);\n\npivot) \\\n schedule(static) num_threads(webdata->nthreads)\n for (jx = 0; jx < MX; jx++)\n {\n for (jy = 0; jy < MY; jy++)\n {\n zxy = IJ_Vptr(zvec, jx, jy);\n Pxy = (webdata->PP)[jx][jy];\n pivot = (webdata->pivot)[jx][jy];\n SUNDlsMat_denseGETRS(Pxy, NUM_SPECIES, pivot, zxy);\n }\n } #pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/ida/C_openmp/idaFoodWeb_kry_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) \\", "context_chars": 100, "text": "x, yy, cxy, ratesxy, webdata);\n\n /* Loop over species, do differencing, load crate segment. */\nschedule(static) num_threads(webdata->nthreads)\n for (is = 0; is < NUM_SPECIES; is++)\n {\n /* Differencing in y. */\n dcyli = *(cxy + is) - *(cxy - idyl + is);\n dcyui = *(cxy + idyu + is) - *(cxy + is);\n\n /* Differencing in x. */\n dcxli = *(cxy + is) - *(cxy - idxl + is);\n dcxui = *(cxy + idxu + is) - *(cxy + is);\n\n /* Compute the crate values at (xx,yy). */\n cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) +\n ratesxy[is];\n\n } #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/ida/C_openmp/idaFoodWeb_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) \\", "context_chars": 100, "text": "idual values appropriately\n for differential or algebraic components. */\nschedule(static) num_threads(webdata->nthreads)\n for (jy = 0; jy < MY; jy++)\n {\n yloc = NSMX * jy;\n for (jx = 0; jx < MX; jx++)\n {\n loc = yloc + NUM_SPECIES * jx;\n for (is = 0; is < NUM_SPECIES; is++)\n {\n if (is < np) { resv[loc + is] = cpv[loc + is] - resv[loc + is]; }\n else { resv[loc + is] = -resv[loc + is]; }\n }\n }\n } #pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/ida/C_openmp/idaFoodWeb_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) \\", "context_chars": 100, "text": "x, yy, cxy, ratesxy, webdata);\n\n /* Loop over species, do differencing, load crate segment. */\nschedule(static) num_threads(webdata->nthreads)\n for (is = 0; is < NUM_SPECIES; is++)\n {\n /* Differencing in y. */\n dcyli = *(cxy + is) - *(cxy - idyl + is);\n dcyui = *(cxy + idyu + is) - *(cxy + is);\n\n /* Differencing in x. */\n dcxli = *(cxy + is) - *(cxy - idxl + is);\n dcxui = *(cxy + idxu + is) - *(cxy + is);\n\n /* Compute the crate values at (xx,yy). */\n cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) +\n ratesxy[is];\n\n } #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/cvode/C_openmp/cvAdvDiff_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, \\", "context_chars": 100, "text": "= data->hdcoef;\n horac = data->hacoef;\n verdc = data->vdcoef;\n\n /* Loop over all grid points. */\nurt, hdiff, hadv, vdiff) \\\n num_threads(data->nthreads)\n for (j = 1; j <= MY; j++)\n {\n for (i = 1; i <= MX; i++)\n {\n /* Extract u at x_i, y_j and four neighboring points */\n\n uij = IJth(udata, i, j);\n udn = (j == 1) ? ZERO : IJth(udata, i, j - 1);\n uup = (j == MY) ? ZERO : IJth(udata, i, j + 1);\n ult = (i == 1) ? ZERO : IJth(udata, i - 1, j);\n urt = (i == MX) ? ZERO : IJth(udata, i + 1, j);\n\n /* Set diffusion and advection terms and load into udot */\n\n hdiff = hordc * (ult - TWO * uij + urt);\n hadv = horac * (urt - ult);\n vdiff = verdc * (uup - TWO * uij + udn);\n IJth(dudata, i, j) = hdiff + hadv + vdiff;\n }\n } #pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/cvode/C_openmp/cvAdvDiff_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) \\", "context_chars": 100, "text": "ta = (UserData)user_data;\n hordc = data->hdcoef;\n horac = data->hacoef;\n verdc = data->vdcoef;\n\nnum_threads(data->nthreads)\n for (j = 1; j <= MY; j++)\n {\n for (i = 1; i <= MX; i++)\n {\n k = j - 1 + (i - 1) * MY;\n kthCol = SUNBandMatrix_Column(J, k);\n\n /* set the kth column of J */\n\n SM_COLUMN_ELEMENT_B(kthCol, k, k) = -TWO * (verdc + hordc);\n if (i != 1) { SM_COLUMN_ELEMENT_B(kthCol, k - MY, k) = hordc + horac; }\n if (i != MX) { SM_COLUMN_ELEMENT_B(kthCol, k + MY, k) = hordc - horac; }\n if (j != 1) { SM_COLUMN_ELEMENT_B(kthCol, k - 1, k) = verdc; }\n if (j != MY) { SM_COLUMN_ELEMENT_B(kthCol, k + 1, k) = verdc; }\n }\n } #pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/cvode/C_openmp/cvAdvDiff_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j, i, y, x)", "context_chars": 100, "text": "o data array in vector u. */\n\n udata = NV_DATA_OMP(u);\n\n /* Load initial profile into u vector */\nfor (j = 1; j <= MY; j++)\n {\n y = j * dy;\n for (i = 1; i <= MX; i++)\n {\n x = i * dx;\n IJth(udata, i, j) = x * (XMAX - x) * y * (YMAX - y) * exp(FIVE * x * y);\n }\n } #pragma omp parallel for default(shared) private(j, i, y, x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/idas/C_openmp/idasFoodWeb_kry_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) \\", "context_chars": 100, "text": "idual values appropriately\n for differential or algebraic components. */\nschedule(static) num_threads(webdata->nthreads)\n for (jy = 0; jy < MY; jy++)\n {\n yloc = NSMX * jy;\n for (jx = 0; jx < MX; jx++)\n {\n loc = yloc + NUM_SPECIES * jx;\n for (is = 0; is < NUM_SPECIES; is++)\n {\n if (is < np) { resv[loc + is] = cpv[loc + is] - resv[loc + is]; }\n else { resv[loc + is] = -resv[loc + is]; }\n }\n }\n } #pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/idas/C_openmp/idasFoodWeb_kry_omp.c", "omp_pragma_line": "#pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, \\", "context_chars": 100, "text": " UserData webdata;\n\n jx = jy = 0;\n\n webdata = (UserData)user_data;\n\n N_VScale(ONE, rvec, zvec);\n\npivot) \\\n schedule(static) num_threads(webdata->nthreads)\n for (jx = 0; jx < MX; jx++)\n {\n for (jy = 0; jy < MY; jy++)\n {\n zxy = IJ_Vptr(zvec, jx, jy);\n Pxy = (webdata->PP)[jx][jy];\n pivot = (webdata->pivot)[jx][jy];\n SUNDlsMat_denseGETRS(Pxy, NUM_SPECIES, pivot, zxy);\n }\n } #pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/idas/C_openmp/idasFoodWeb_kry_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) \\", "context_chars": 100, "text": "x, yy, cxy, ratesxy, webdata);\n\n /* Loop over species, do differencing, load crate segment. */\nschedule(static) num_threads(webdata->nthreads)\n for (is = 0; is < NUM_SPECIES; is++)\n {\n /* Differencing in y. */\n dcyli = *(cxy + is) - *(cxy - idyl + is);\n dcyui = *(cxy + idyu + is) - *(cxy + is);\n\n /* Differencing in x. */\n dcxli = *(cxy + is) - *(cxy - idxl + is);\n dcxui = *(cxy + idxu + is) - *(cxy + is);\n\n /* Compute the crate values at (xx,yy). */\n cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) +\n ratesxy[is];\n\n } #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/idas/C_openmp/idasFoodWeb_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) \\", "context_chars": 100, "text": "idual values appropriately\n for differential or algebraic components. */\nschedule(static) num_threads(webdata->nthreads)\n for (jy = 0; jy < MY; jy++)\n {\n yloc = NSMX * jy;\n for (jx = 0; jx < MX; jx++)\n {\n loc = yloc + NUM_SPECIES * jx;\n for (is = 0; is < NUM_SPECIES; is++)\n {\n if (is < np) { resv[loc + is] = cpv[loc + is] - resv[loc + is]; }\n else { resv[loc + is] = -resv[loc + is]; }\n }\n }\n } #pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/examples/idas/C_openmp/idasFoodWeb_bnd_omp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) \\", "context_chars": 100, "text": "x, yy, cxy, ratesxy, webdata);\n\n /* Loop over species, do differencing, load crate segment. */\nschedule(static) num_threads(webdata->nthreads)\n for (is = 0; is < NUM_SPECIES; is++)\n {\n /* Differencing in y. */\n dcyli = *(cxy + is) - *(cxy - idyl + is);\n dcyui = *(cxy + idyu + is) - *(cxy + is);\n\n /* Differencing in x. */\n dcxli = *(cxy + is) - *(cxy - idxl + is);\n dcxui = *(cxy + idxu + is) - *(cxy + is);\n\n /* Compute the crate values at (xx,yy). */\n cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) +\n ratesxy[is];\n\n } #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, a, b, xd, yd, zd) \\", "context_chars": 100, "text": " */\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = (a * xd[i]) + (b * yd[i]); } #pragma omp parallel for default(none) private(i) shared(N, a, b, xd, yd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, c, zd) \\", "context_chars": 100, "text": "tialize to suppress clang warning */\n zd = NULL;\n\n N = NV_LENGTH_OMP(z);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(z))\n for (i = 0; i < N; i++) { zd[i] = c; } #pragma omp parallel for default(none) private(i) shared(N, c, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, yd, zd) \\", "context_chars": 100, "text": "LL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = xd[i] * yd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, yd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, yd, zd) \\", "context_chars": 100, "text": "LL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = xd[i] / yd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, yd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, c, xd, zd) \\", "context_chars": 100, "text": "P(x, z); }\n else\n {\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = c * xd[i]; } #pragma omp parallel for default(none) private(i) shared(N, c, xd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x))", "context_chars": 100, "text": "rning */\n xd = zd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n zd = NV_DATA_OMP(z);\n\nfor (i = 0; i < N; i++) { zd[i] = SUNRabs(xd[i]); } #pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x))"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, zd) \\", "context_chars": 100, "text": "rning */\n xd = zd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = ONE / xd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, b, xd, zd) \\", "context_chars": 100, "text": "rning */\n xd = zd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = xd[i] + b; } #pragma omp parallel for default(none) private(i) shared(N, b, xd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, yd) \\", "context_chars": 100, "text": " = ZERO;\n xd = yd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n\nreduction(+ : sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { sum += xd[i] * yd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, yd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, wd) \\", "context_chars": 100, "text": " = ZERO;\n xd = wd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n wd = NV_DATA_OMP(w);\n\nreduction(+ : sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { sum += SUNSQR(xd[i] * wd[i]); } #pragma omp parallel for default(none) private(i) shared(N, xd, wd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd) \\", "context_chars": 100, "text": "ress clang warning */\n sum = ZERO;\n xd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n\nreduction(+ : sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { sum += SUNRabs(xd[i]); } #pragma omp parallel for default(none) private(i) shared(N, xd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, c, xd, zd) \\", "context_chars": 100, "text": "rning */\n xd = zd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO; } #pragma omp parallel for default(none) private(i) shared(N, c, xd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, val, xd, zd) \\", "context_chars": 100, "text": " zd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n zd = NV_DATA_OMP(z);\n\n val = ZERO;\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++)\n {\n if (xd[i] == ZERO) { val = ONE; }\n else { zd[i] = ONE / xd[i]; }\n } #pragma omp parallel for default(none) private(i) shared(N, val, xd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i, test) \\", "context_chars": 100, "text": "ENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n cd = NV_DATA_OMP(c);\n md = NV_DATA_OMP(m);\n\n temp = ZERO;\n\nshared(N, xd, cd, md, temp) schedule(static) \\\n num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++)\n {\n md[i] = ZERO;\n\n /* Continue if no constraints were set for the variable */\n if (cd[i] == ZERO) { continue; }\n\n /* Check if a set constraint has been violated */\n test = (SUNRabs(cd[i]) > ONEPT5 && xd[i] * cd[i] <= ZERO) ||\n (SUNRabs(cd[i]) > HALF && xd[i] * cd[i] < ZERO);\n if (test) { temp = md[i] = ONE; /* Here is a race to write to temp */ }\n } #pragma omp parallel for default(none) private(i, test) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, wd) \\", "context_chars": 100, "text": " = ZERO;\n xd = wd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n wd = NV_DATA_OMP(w);\n\nreduction(+ : sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { sum += SUNSQR(xd[i] * wd[i]); } #pragma omp parallel for default(none) private(i) shared(N, xd, wd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, wd, idd) \\", "context_chars": 100, "text": " N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n wd = NV_DATA_OMP(w);\n idd = NV_DATA_OMP(id);\n\nreduction(+ : sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++)\n {\n if (idd[i] > ZERO) { sum += SUNSQR(xd[i] * wd[i]); }\n } #pragma omp parallel for default(none) private(i) shared(N, xd, wd, idd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, zd) \\", "context_chars": 100, "text": "rning */\n xd = zd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = xd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, yd, zd) \\", "context_chars": 100, "text": "LL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = xd[i] + yd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, yd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, yd, zd) \\", "context_chars": 100, "text": "LL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = xd[i] - yd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, yd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, zd) \\", "context_chars": 100, "text": "rning */\n xd = zd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = -xd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, c, xd, yd, zd) \\", "context_chars": 100, "text": "LL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = c * (xd[i] + yd[i]); } #pragma omp parallel for default(none) private(i) shared(N, c, xd, yd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, c, xd, yd, zd) \\", "context_chars": 100, "text": "LL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = c * (xd[i] - yd[i]); } #pragma omp parallel for default(none) private(i) shared(N, c, xd, yd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, a, xd, yd, zd) \\", "context_chars": 100, "text": "LL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = (a * xd[i]) + yd[i]; } #pragma omp parallel for default(none) private(i) shared(N, a, xd, yd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, a, xd, yd, zd) \\", "context_chars": 100, "text": "LL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n zd = NV_DATA_OMP(z);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { zd[i] = (a * xd[i]) - yd[i]; } #pragma omp parallel for default(none) private(i) shared(N, a, xd, yd, zd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, yd) \\", "context_chars": 100, "text": " NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n yd = NV_DATA_OMP(y);\n\n if (a == ONE)\n {\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { yd[i] += xd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, yd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, xd, yd) \\", "context_chars": 100, "text": "READS_OMP(x))\n for (i = 0; i < N; i++) { yd[i] += xd[i]; }\n return;\n }\n\n if (a == -ONE)\n {\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { yd[i] -= xd[i]; } #pragma omp parallel for default(none) private(i) shared(N, xd, yd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, a, xd, yd) \\", "context_chars": 100, "text": "num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { yd[i] -= xd[i]; }\n return;\n }\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { yd[i] += a * xd[i]; } #pragma omp parallel for default(none) private(i) shared(N, a, xd, yd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmp/nvector_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) private(i) shared(N, a, xd) \\", "context_chars": 100, "text": "tialize to suppress clang warning */\n xd = NULL;\n\n N = NV_LENGTH_OMP(x);\n xd = NV_DATA_OMP(x);\n\nschedule(static) num_threads(NV_NUM_THREADS_OMP(x))\n for (i = 0; i < N; i++) { xd[i] *= a; } #pragma omp parallel for default(none) private(i) shared(N, a, xd) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "mp teams distribute\n {\n for (i = 1; i < nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\nfor (j = 0; j < N; j++)\n {\n to_add = c[i] * xd_dev[j];\n#pragma omp atomic\n zd_dev[j] += to_add;\n } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "mp teams distribute\n {\n for (i = 1; i < nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\nfor (j = 0; j < N; j++)\n {\n to_add = c[i] * xd_dev[j];\n#pragma omp atomic\n zd_dev[j] += to_add;\n } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "pragma omp teams distribute\n {\n for (i = 1; i < nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\nfor (j = 0; j < N; j++)\n {\n to_add = c[i] * xd_dev[j];\n#pragma omp atomic\n zd_dev[j] += to_add;\n } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "mp teams distribute\n {\n for (i = 0; i < nvec; i++)\n {\n yd_dev = yd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { yd_dev[j] += a[i] * xd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n for (i = 0; i < nvec; i++)\n {\n yd_dev = yd_dev_ptrs[i];\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : sum) schedule(static, 1)", "context_chars": 100, "text": "p teams distribute\n for (i = 0; i < nvec; i++)\n {\n yd_dev = yd_dev_ptrs[i];\n sum = ZERO;\nfor (j = 0; j < N; j++) { sum += xd_dev[j] * yd_dev[j]; } #pragma omp parallel for reduction(+ : sum) schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = a * xd_dev[j] + b * yd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "mp teams distribute\n {\n for (i = 0; i < nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { xd_dev[j] *= c[i]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n for (i = 0; i < nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = c[i] * xd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "pragma omp teams distribute\n {\n for (i = 0; i < nvec; i++)\n {\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = c; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : sum) schedule(static, 1)", "context_chars": 100, "text": "nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\n wd_dev = wd_dev_ptrs[i];\n sum = ZERO;\n{\n for (j = 0; j < N; j++) { sum += SUNSQR(xd_dev[j] * wd_dev[j]); }\n } #pragma omp parallel for reduction(+ : sum) schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for reduction(+ : sum) schedule(static, 1)", "context_chars": 100, "text": "nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\n wd_dev = wd_dev_ptrs[i];\n sum = ZERO;\n{\n for (j = 0; j < N; j++)\n {\n if (idd_dev[j] > ZERO) { sum += SUNSQR(xd_dev[j] * wd_dev[j]); }\n }\n } #pragma omp parallel for reduction(+ : sum) schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "ptrs[i];\n for (j = 0; j < nsum; j++)\n {\n yd_dev = yd_dev_ptrs[i * nsum + j];\nfor (k = 0; k < N; k++) { yd_dev[k] += a[j] * xd_dev[k]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "++)\n {\n yd_dev = yd_dev_ptrs[i * nsum + j];\n zd_dev = zd_dev_ptrs[i * nsum + j];\nfor (k = 0; k < N; k++) { zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "ptrs[j];\n for (i = 1; i < nsum; i++)\n {\n xd_dev = xd_dev_ptrs[j * nsum + i];\nfor (k = 0; k < N; k++) { zd_dev[k] += c[i] * xd_dev[k]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "mp teams distribute\n {\n for (j = 0; j < nvec; j++)\n {\n zd_dev = zd_dev_ptrs[j];\nfor (k = 0; k < N; k++) { zd_dev[k] *= c[0]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "c[0]; }\n\n for (i = 1; i < nsum; i++)\n {\n xd_dev = xd_dev_ptrs[j * nsum + i];\nfor (k = 0; k < N; k++) { zd_dev[k] += c[i] * xd_dev[k]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": " sum into the output vector */\n xd_dev = xd_dev_ptrs[j * nsum];\n zd_dev = zd_dev_ptrs[j];\nfor (k = 0; k < N; k++) { zd_dev[k] = c[0] * xd_dev[k]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "tput vector */\n for (i = 1; i < nsum; i++)\n {\n xd_dev = xd_dev_ptrs[j * nsum + i];\nfor (k = 0; k < N; k++) { zd_dev[k] += c[i] * xd_dev[k]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = xd_dev[j] + yd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = xd_dev[j] - yd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = c * (xd_dev[j] + yd_dev[j]); } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = c * (xd_dev[j] - yd_dev[j]); } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = (a * xd_dev[j]) + yd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\n zd_dev = zd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { zd_dev[j] = (a * xd_dev[j]) - yd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "or (i = 0; i < nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { yd_dev[j] += xd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "or (i = 0; i < nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { yd_dev[j] -= xd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/sundials/src/nvector/openmpdev/nvector_openmpdev.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 1)", "context_chars": 100, "text": "\n for (i = 0; i < nvec; i++)\n {\n xd_dev = xd_dev_ptrs[i];\n yd_dev = yd_dev_ptrs[i];\nfor (j = 0; j < N; j++) { yd_dev[j] += a * xd_dev[j]; } #pragma omp parallel for schedule(static, 1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "atrix);\n \n indx_offd = hypre_AuxParCSRMatrixIndxOffd(aux_matrix);\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < local_num_rows; i++)\n {\n\t indx_diag[i] = diag_i[i];\n\t indx_offd[i] = offd_i[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "HYPRE_NO_GLOBAL_PARTITION\n pstart = 0;\n#else\n pstart = my_id;\n#endif\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < nrows; i++)\n {\n row_index = rows[i];\n if (row_index >= row_partitioning[pstart] && \n row_index < row_partitioning[pstart+1])\n {\n /* compute local row number */\n row_index -= row_partitioning[pstart]; \n ncols[i] = diag_i[row_index+1]-diag_i[row_index]+offd_i[row_index+1]\n -offd_i[row_index];\n }\n else\n {\n ncols[i] = 0;\n\t if (print_level)\n hypre_printf (\"Warning! Row %d is not on Proc. %d!\\n\",\n row_index, my_id);\n }\n } #pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,j0,temp)", "context_chars": 100, "text": "= hypre_CSRMatrixJ(diag);\n diag_data = hypre_CSRMatrixData(diag);\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < num_rows; i++)\n {\n \t j0 = diag_i[i];\n \t for (j=j0; j < diag_i[i+1]; j++)\n \t {\n \t diag_j[j] -= col_0;\n \t if (diag_j[j] == i)\n \t {\n \t temp = diag_data[j0];\n \t diag_data[j0] = diag_data[j];\n \t diag_data[j] = temp;\n \t diag_j[j] = diag_j[j0];\n \t diag_j[j0] = i;\n \t }\n \t }\n } #pragma omp parallel for private (i,j,j0,temp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/IJ_mv/IJMatrix_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "or (i=0; i < num_cols_offd; i++)\n \t col_map_offd[i] = aux_offd_j[i];\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < nnz_offd; i++)\n offd_j[i]=hypre_BinarySearch(col_map_offd,offd_j[i],num_cols_offd);\n\n \t if (base)\n \t {\n\t for (i=0; i < num_cols_offd; i++)\n\t col_map_offd[i] -= base;\n\t } #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/IJ_mv/IJVector_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "eturn hypre_error_flag;\n }\n\n data = hypre_VectorData( local_vector );\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < vec_stop - vec_start; i++)\n data[i] = 0.;\n \n return hypre_error_flag;\n}\n\n/******************************************************************************\n *\n * hypre_IJVectorSetValuesPar\n *\n * sets a potentially noncontiguous set of components of an IJVectorPar\n *\n *****************************************************************************/\n\nHYPRE_Int\nhypre_IJVectorSetValuesPar(hypre_IJVector *vector,\n HYPRE_Int num_values,\n const HYPRE_Int *indices,\n const HYPRE_Complex *values)\n{\n HYPRE_Int my_id;\n HYPRE_Int i, j, vec_start, vec_stop;\n HYPRE_Complex *data;\n HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);\n\n HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);\n hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);\n hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);\n MPI_Comm comm = hypre_IJVectorComm(vector);\n hypre_Vector *local_vector;\n\n /* If no components are to be set, perform no checking and return */\n if (num_values < 1) return 0;\n\n hypre_MPI_Comm_rank(comm, &my_id);\n\n /* If par_vector == NULL or partitioning == NULL or local_vector == NULL \n let user know of catastrophe and exit */\n\n if (!par_vector)\n {\n if (print_level)\n {\n hypre_printf(\"par_vector == NULL -- \");\n hypre_printf(\"hypre_IJVectorSetValuesPar\\n\");\n hypre_printf(\"**** Vector storage is either unallocated or orphaned ****\\n\");\n }\n hypre_error_in_arg(1);\n return hypre_error_flag;\n }\n local_vector = hypre_ParVectorLocalVector(par_vector);\n if (!IJpartitioning)\n {\n if (print_level)\n {\n hypre_printf(\"IJpartitioning == NULL -- \");\n hypre_printf(\"hypre_IJVectorSetValuesPar\\n\");\n hypre_printf(\"**** IJVector partitioning is either unallocated or orphaned ****\\n\");\n }\n hypre_error_in_arg(1);\n return hypre_error_flag;\n }\n if (!local_vector)\n {\n if (print_level)\n {\n hypre_printf(\"local_vector == NULL -- \");\n hypre_printf(\"hypre_IJVectorSetValuesPar\\n\");\n hypre_printf(\"**** Vector local data is either unallocated or orphaned ****\\n\");\n }\n hypre_error_in_arg(1);\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_NO_GLOBAL_PARTITION\n vec_start = IJpartitioning[0];\n vec_stop = IJpartitioning[1]-1;\n#else\n vec_start = IJpartitioning[my_id];\n vec_stop = IJpartitioning[my_id+1]-1;\n\n\n if (vec_start > vec_stop) \n {\n if (print_level)\n {\n hypre_printf(\"vec_start > vec_stop -- \");\n hypre_printf(\"hypre_IJVectorSetValuesPar\\n\");\n hypre_printf(\"**** This vector partitioning should not occur ****\\n\");\n }\n hypre_error_in_arg(1);\n return hypre_error_flag;\n }\n\n /* Determine whether indices points to local indices only, and if not, store\n indices and values in auxiliary vector structure. If indices == NULL,\n assume that num_values components are to be set in a block starting at\n vec_start. NOTE: If indices == NULL off proc values are ignored!!! */\n\n data = hypre_VectorData(local_vector);\n\n if (indices)\n {\n HYPRE_Int current_num_elmts\n = hypre_AuxParVectorCurrentNumElmts(aux_vector);\n HYPRE_Int *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);\n HYPRE_Int cancel_indx = hypre_AuxParVectorCancelIndx(aux_vector);\n HYPRE_Int ii;\n\n for (j = 0; j < num_values; j++)\n {\n\t i = indices[j];\n\t if (i < vec_start || i > vec_stop)\n {\n for (ii = 0; ii < current_num_elmts; ii++)\n\t {\n\t if (i == off_proc_i[ii])\n\t {\n\t\t off_proc_i[ii] = -1;\n\t\t cancel_indx++;\n }\n }\n hypre_AuxParVectorCancelIndx(aux_vector) = cancel_indx;\n }\n else /* local values are inserted into the vector */\n {\n i -= vec_start;\n data[i] = values[j];\n }\n } \n }\n else \n {\n if (num_values > vec_stop - vec_start + 1)\n {\n if (print_level)\n {\n hypre_printf(\"Warning! Indices beyond local range not identified!\\n \");\n hypre_printf(\"Off processor values have been ignored!\\n\");\n }\n\t num_values = vec_stop - vec_start +1;\n }\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_values; j++)\n data[j] = values[j];\n } \n \n return hypre_error_flag;\n} #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/IJ_mv/IJVector_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " ignored!\\n\");\n }\n\t num_values = vec_stop - vec_start +1;\n }\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_values; j++)\n data[j] = values[j];\n } \n \n return hypre_error_flag;\n}\n\n/******************************************************************************\n *\n * hypre_IJVectorAddToValuesPar\n *\n * adds to a potentially noncontiguous set of IJVectorPar components\n *\n *****************************************************************************/\n\nHYPRE_Int\nhypre_IJVectorAddToValuesPar(hypre_IJVector *vector,\n HYPRE_Int num_values,\n const HYPRE_Int *indices,\n const HYPRE_Complex *values)\n{\n HYPRE_Int my_id;\n HYPRE_Int i, j, vec_start, vec_stop;\n HYPRE_Complex *data;\n HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);\n\n HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);\n hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);\n hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);\n MPI_Comm comm = hypre_IJVectorComm(vector);\n hypre_Vector *local_vector;\n\n /* If no components are to be retrieved, perform no checking and return */\n if (num_values < 1) return 0;\n\n hypre_MPI_Comm_rank(comm, &my_id);\n\n /* If par_vector == NULL or partitioning == NULL or local_vector == NULL \n let user know of catastrophe and exit */\n\n if (!par_vector)\n {\n if (print_level)\n {\n hypre_printf(\"par_vector == NULL -- \");\n hypre_printf(\"hypre_IJVectorAddToValuesPar\\n\");\n hypre_printf(\"**** Vector storage is either unallocated or orphaned ****\\n\");\n }\n hypre_error_in_arg(1);\n return hypre_error_flag;\n }\n local_vector = hypre_ParVectorLocalVector(par_vector);\n if (!IJpartitioning)\n {\n if (print_level)\n {\n hypre_printf(\"IJpartitioning == NULL -- \");\n hypre_printf(\"hypre_IJVectorAddToValuesPar\\n\");\n hypre_printf(\"**** IJVector partitioning is either unallocated or orphaned ****\\n\");\n }\n hypre_error_in_arg(1);\n return hypre_error_flag;\n }\n if (!local_vector)\n {\n if (print_level)\n {\n hypre_printf(\"local_vector == NULL -- \");\n hypre_printf(\"hypre_IJVectorAddToValuesPar\\n\");\n hypre_printf(\"**** Vector local data is either unallocated or orphaned ****\\n\");\n }\n hypre_error_in_arg(1);\n return hypre_error_flag;\n }\n\n#ifdef HYPRE_NO_GLOBAL_PARTITION\n vec_start = IJpartitioning[0];\n vec_stop = IJpartitioning[1]-1;\n#else\n vec_start = IJpartitioning[my_id];\n vec_stop = IJpartitioning[my_id+1]-1;\n\n\n if (vec_start > vec_stop) \n {\n if (print_level)\n {\n hypre_printf(\"vec_start > vec_stop -- \");\n hypre_printf(\"hypre_IJVectorAddToValuesPar\\n\");\n hypre_printf(\"**** This vector partitioning should not occur ****\\n\");\n }\n hypre_error_in_arg(1);\n return hypre_error_flag;\n }\n\n data = hypre_VectorData(local_vector);\n\n if (indices)\n {\n HYPRE_Int current_num_elmts\n = hypre_AuxParVectorCurrentNumElmts(aux_vector);\n HYPRE_Int max_off_proc_elmts\n = hypre_AuxParVectorMaxOffProcElmts(aux_vector);\n HYPRE_Int *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);\n HYPRE_Complex *off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);\n\n for (j = 0; j < num_values; j++)\n {\n\t i = indices[j];\n\t if (i < vec_start || i > vec_stop)\n {\n /* if elements outside processor boundaries, store in off processor\n stash */\n\t if (!max_off_proc_elmts)\n {\n max_off_proc_elmts = 100;\n hypre_AuxParVectorMaxOffProcElmts(aux_vector) =\n max_off_proc_elmts;\n hypre_AuxParVectorOffProcI(aux_vector)\n = hypre_CTAlloc(HYPRE_Int,max_off_proc_elmts);\n hypre_AuxParVectorOffProcData(aux_vector)\n = hypre_CTAlloc(HYPRE_Complex,max_off_proc_elmts);\n off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);\n off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);\n }\n else if (current_num_elmts + 1 > max_off_proc_elmts)\n {\n max_off_proc_elmts += 10;\n off_proc_i = hypre_TReAlloc(off_proc_i,HYPRE_Int,max_off_proc_elmts);\n off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,\n max_off_proc_elmts);\n hypre_AuxParVectorMaxOffProcElmts(aux_vector)\n = max_off_proc_elmts;\n hypre_AuxParVectorOffProcI(aux_vector) = off_proc_i;\n hypre_AuxParVectorOffProcData(aux_vector) = off_proc_data;\n }\n off_proc_i[current_num_elmts] = i;\n off_proc_data[current_num_elmts++] = values[j];\n hypre_AuxParVectorCurrentNumElmts(aux_vector)=current_num_elmts;\n }\n else /* local values are added to the vector */\n {\n i -= vec_start;\n data[i] += values[j];\n }\n } \n }\n else \n {\n if (num_values > vec_stop - vec_start + 1)\n {\n if (print_level)\n {\n hypre_printf(\"Warning! Indices beyond local range not identified!\\n \");\n hypre_printf(\"Off processor values have been ignored!\\n\");\n }\n\t num_values = vec_stop - vec_start +1;\n }\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_values; j++)\n data[j] += values[j];\n } \n \n return hypre_error_flag;\n} #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/IJ_mv/IJVector_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " ignored!\\n\");\n }\n\t num_values = vec_stop - vec_start +1;\n }\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_values; j++)\n data[j] += values[j];\n } \n \n return hypre_error_flag;\n}\n\n/******************************************************************************\n *\n * hypre_IJVectorAssemblePar\n *\n * currently tests existence of of ParVector object and its partitioning\n *\n *****************************************************************************/\n\nHYPRE_Int\nhypre_IJVectorAssemblePar(hypre_IJVector *vector)\n{\n HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);\n hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);\n hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);\n HYPRE_Int *partitioning;\n MPI_Comm comm = hypre_IJVectorComm(vector);\n HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);\n\n if (!par_vector)\n {\n if (print_level)\n {\n hypre_printf(\"par_vector == NULL -- \");\n hypre_printf(\"hypre_IJVectorAssemblePar\\n\");\n hypre_printf(\"**** Vector storage is either unallocated or orphaned ****\\n\");\n }\n hypre_error_in_arg(1);\n } \n partitioning = hypre_ParVectorPartitioning(par_vector);\n if (!IJpartitioning)\n { \n if (print_level)\n {\n hypre_printf(\"IJpartitioning == NULL -- \");\n hypre_printf(\"hypre_IJVectorAssemblePar\\n\");\n hypre_printf(\"**** IJVector partitioning is either unallocated or orphaned ****\\n\");\n }\n hypre_error_in_arg(1);\n }\n if (!partitioning)\n { \n if (print_level)\n {\n hypre_printf(\"partitioning == NULL -- \");\n hypre_printf(\"hypre_IJVectorAssemblePar\\n\");\n hypre_printf(\"**** ParVector partitioning is either unallocated or orphaned ****\\n\");\n }\n hypre_error_in_arg(1);\n }\n\n if (aux_vector)\n {\n HYPRE_Int off_proc_elmts, current_num_elmts;\n HYPRE_Int max_off_proc_elmts;\n HYPRE_Int *off_proc_i;\n HYPRE_Complex *off_proc_data;\n HYPRE_Int cancel_indx = hypre_AuxParVectorCancelIndx(aux_vector);\n HYPRE_Int current_i, ii;\n current_num_elmts = hypre_AuxParVectorCurrentNumElmts(aux_vector);\n if (cancel_indx)\n {\n off_proc_i=hypre_AuxParVectorOffProcI(aux_vector);\n off_proc_data=hypre_AuxParVectorOffProcData(aux_vector);\n current_i = 0;\n\t for (ii=0; ii < current_num_elmts; ii++) \n {\n if (off_proc_i[ii] != -1)\n\t {\n\t off_proc_i[current_i] = off_proc_i[ii];\n\t off_proc_data[current_i++] = off_proc_data[ii];\n\t }\n }\n hypre_AuxParVectorCurrentNumElmts(aux_vector) = current_i;\n current_num_elmts = current_i;\n }\n hypre_MPI_Allreduce(¤t_num_elmts,&off_proc_elmts,1,HYPRE_MPI_INT,\n hypre_MPI_SUM,comm);\n if (off_proc_elmts)\n {\n max_off_proc_elmts=hypre_AuxParVectorMaxOffProcElmts(aux_vector);\n off_proc_i=hypre_AuxParVectorOffProcI(aux_vector);\n off_proc_data=hypre_AuxParVectorOffProcData(aux_vector);\n hypre_IJVectorAssembleOffProcValsPar(vector, max_off_proc_elmts, \n current_num_elmts, off_proc_i, off_proc_data);\n\t hypre_TFree(hypre_AuxParVectorOffProcI(aux_vector));\n\t hypre_TFree(hypre_AuxParVectorOffProcData(aux_vector));\n\t hypre_AuxParVectorMaxOffProcElmts(aux_vector) = 0;\n\t hypre_AuxParVectorCurrentNumElmts(aux_vector) = 0;\n }\n }\n\n return hypre_error_flag;\n} #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/IJ_mv/IJVector_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n \n data = hypre_VectorData(local_vector);\n\n if (indices)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_values; j++)\n {\n i = indices[j] - vec_start;\n values[j] = data[i];\n } #pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/IJ_mv/IJVector_parcsr.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n hypre_error_in_arg(2);\n return hypre_error_flag;\n }\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_values; j++)\n values[j] = data[j];\n }\n\n return hypre_error_flag;\n}\n\n/******************************************************************************\n * hypre_IJVectorAssembleOffProcValsPar\n *\n * This is for handling set and get values calls to off-proc. entries - it is\n * called from assemble. There is an alternate version for when the assumed\n * partition is being used.\n *****************************************************************************/\n\n#ifndef HYPRE_NO_GLOBAL_PARTITION\n\nHYPRE_Int\nhypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector, \n \t\t\t\t HYPRE_Int max_off_proc_elmts,\n \t\t\t\t HYPRE_Int current_num_elmts,\n \t\t\t\t HYPRE_Int *off_proc_i,\n \t\t\t \t HYPRE_Complex *off_proc_data)\n{\n MPI_Comm comm = hypre_IJVectorComm(vector);\n hypre_ParVector *par_vector = ( hypre_ParVector *) hypre_IJVectorObject(vector);\n hypre_MPI_Request *requests = NULL;\n hypre_MPI_Status *status = NULL;\n HYPRE_Int i, j, j2, row;\n HYPRE_Int iii, indx, ip, first_index;\n HYPRE_Int proc_id, num_procs, my_id;\n HYPRE_Int num_sends, num_sends2;\n HYPRE_Int num_recvs;\n HYPRE_Int num_requests;\n HYPRE_Int vec_start, vec_len;\n HYPRE_Int *send_procs;\n HYPRE_Int *send_i;\n HYPRE_Int *send_map_starts;\n HYPRE_Int *recv_procs;\n HYPRE_Int *recv_i;\n HYPRE_Int *recv_vec_starts;\n HYPRE_Int *info;\n HYPRE_Int *int_buffer;\n HYPRE_Int *proc_id_mem;\n HYPRE_Int *partitioning;\n HYPRE_Int *displs;\n HYPRE_Int *recv_buf;\n HYPRE_Complex *send_data;\n HYPRE_Complex *recv_data;\n HYPRE_Complex *data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));\n\n hypre_MPI_Comm_size(comm,&num_procs);\n hypre_MPI_Comm_rank(comm, &my_id);\n partitioning = hypre_IJVectorPartitioning(vector);\n\n first_index = partitioning[my_id];\n\n info = hypre_CTAlloc(HYPRE_Int,num_procs); \n proc_id_mem = hypre_CTAlloc(HYPRE_Int,current_num_elmts);\n for (i=0; i < current_num_elmts; i++)\n {\n row = off_proc_i[i]; \n proc_id = hypre_FindProc(partitioning,row,num_procs);\n proc_id_mem[i] = proc_id; \n info[proc_id]++;\n }\n\n /* determine send_procs and amount of data to be sent */ \n num_sends = 0;\n for (i=0; i < num_procs; i++)\n {\n if (info[i])\n {\n num_sends++;\n }\n }\n num_sends2 = 2*num_sends;\n send_procs = hypre_CTAlloc(HYPRE_Int,num_sends);\n send_map_starts = hypre_CTAlloc(HYPRE_Int,num_sends+1);\n int_buffer = hypre_CTAlloc(HYPRE_Int,num_sends2);\n j = 0;\n j2 = 0;\n send_map_starts[0] = 0;\n for (i=0; i < num_procs; i++)\n {\n if (info[i])\n {\n send_procs[j++] = i;\n send_map_starts[j] = send_map_starts[j-1]+info[i];\n int_buffer[j2++] = i;\n\t int_buffer[j2++] = info[i];\n }\n }\n\n hypre_MPI_Allgather(&num_sends2,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm);\n\n displs = hypre_CTAlloc(HYPRE_Int, num_procs+1);\n displs[0] = 0;\n for (i=1; i < num_procs+1; i++)\n displs[i] = displs[i-1]+info[i-1];\n recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs]);\n\n hypre_MPI_Allgatherv(int_buffer,num_sends2,HYPRE_MPI_INT,recv_buf,info,displs,\n\t\t\tHYPRE_MPI_INT,comm);\n\n hypre_TFree(int_buffer);\n hypre_TFree(info);\n\n /* determine recv procs and amount of data to be received */\n num_recvs = 0;\n for (j=0; j < displs[num_procs]; j+=2)\n {\n if (recv_buf[j] == my_id)\n\t num_recvs++;\n }\n\n recv_procs = hypre_CTAlloc(HYPRE_Int,num_recvs);\n recv_vec_starts = hypre_CTAlloc(HYPRE_Int,num_recvs+1);\n\n j2 = 0;\n recv_vec_starts[0] = 0;\n for (i=0; i < num_procs; i++)\n {\n for (j=displs[i]; j < displs[i+1]; j+=2)\n {\n if (recv_buf[j] == my_id)\n {\n\t recv_procs[j2++] = i;\n\t recv_vec_starts[j2] = recv_vec_starts[j2-1]+recv_buf[j+1];\n }\n if (j2 == num_recvs) break;\n }\n }\n hypre_TFree(recv_buf);\n hypre_TFree(displs);\n\n /* set up data to be sent to send procs */\n /* send_i contains for each send proc \n indices, send_data contains corresponding values */\n \n send_i = hypre_CTAlloc(HYPRE_Int,send_map_starts[num_sends]);\n send_data = hypre_CTAlloc(HYPRE_Complex,send_map_starts[num_sends]);\n recv_i = hypre_CTAlloc(HYPRE_Int,recv_vec_starts[num_recvs]);\n recv_data = hypre_CTAlloc(HYPRE_Complex,recv_vec_starts[num_recvs]);\n \n for (i=0; i < current_num_elmts; i++)\n {\n proc_id = proc_id_mem[i];\n indx = hypre_BinarySearch(send_procs,proc_id,num_sends);\n iii = send_map_starts[indx];\n send_i[iii] = off_proc_i[i]; \n send_data[iii] = off_proc_data[i];\n send_map_starts[indx]++;\n }\n\n hypre_TFree(proc_id_mem);\n\n for (i=num_sends; i > 0; i--)\n {\n send_map_starts[i] = send_map_starts[i-1];\n }\n send_map_starts[0] = 0;\n\n num_requests = num_recvs+num_sends;\n\n requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);\n status = hypre_CTAlloc(hypre_MPI_Status, num_requests);\n\n j=0; \n for (i=0; i < num_recvs; i++)\n {\n vec_start = recv_vec_starts[i];\n vec_len = recv_vec_starts[i+1] - vec_start;\n ip = recv_procs[i];\n hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_INT,\n ip, 0, comm, &requests[j++]);\n }\n\n for (i=0; i < num_sends; i++)\n {\n vec_start = send_map_starts[i];\n vec_len = send_map_starts[i+1] - vec_start;\n ip = send_procs[i];\n hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_INT,\n ip, 0, comm, &requests[j++]);\n }\n \n if (num_requests)\n {\n hypre_MPI_Waitall(num_requests, requests, status);\n }\n\n j=0;\n for (i=0; i < num_recvs; i++)\n {\n vec_start = recv_vec_starts[i];\n vec_len = recv_vec_starts[i+1] - vec_start;\n ip = recv_procs[i];\n hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,\n ip, 0, comm, &requests[j++]);\n }\n\n for (i=0; i < num_sends; i++)\n {\n vec_start = send_map_starts[i];\n vec_len = send_map_starts[i+1] - vec_start;\n ip = send_procs[i];\n hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,\n ip, 0, comm, &requests[j++]);\n }\n \n if (num_requests)\n {\n hypre_MPI_Waitall(num_requests, requests, status);\n }\n\n hypre_TFree(requests);\n hypre_TFree(status);\n hypre_TFree(send_i);\n hypre_TFree(send_data);\n hypre_TFree(send_procs);\n hypre_TFree(send_map_starts);\n hypre_TFree(recv_procs);\n\n for (i=0; i < recv_vec_starts[num_recvs]; i++)\n {\n row = recv_i[i];\n j = row - first_index;\n data[j] += recv_data[i];\n }\n\n hypre_TFree(recv_vec_starts);\n hypre_TFree(recv_i);\n hypre_TFree(recv_data);\n\n return hypre_error_flag;\n} #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_mv/par_csr_matrix.c", "omp_pragma_line": "#pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ";\n size = num_rows/num_threads;\n rest = num_rows - size*num_threads;\n\n#ifdef HYPRE_USING_OPENMP\nfor (ii=0; ii < num_threads; ii++)\n {\n HYPRE_Int ns, ne;\n if (ii < rest)\n {\n ns = ii*size+ii;\n ne = (ii+1)*size+ii+1;\n }\n else\n {\n ns = ii*size+rest;\n ne = (ii+1)*size+rest;\n }\n count = diag_i[ns]+offd_i[ns];;\n for (i=ns; i < ne; i++)\n {\n matrix_i[i] = count;\n for (j=diag_i[i]; j < diag_i[i+1]; j++)\n {\n matrix_data[count] = diag_data[j];\n matrix_j[count++] = diag_j[j]+first_col_diag;\n }\n for (j=offd_i[i]; j < offd_i[i+1]; j++)\n {\n matrix_data[count] = offd_data[j];\n matrix_j[count++] = col_map_offd[offd_j[j]];\n }\n }\n } #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ap(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse);\n\n HYPRE_Int i, j;\nfor (i = 0; i < num_cols_offd_A; i++)\n for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++)\n B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]);\n\n if (num_cols_offd_C)\n {\n hypre_UnorderedIntMapDestroy(&col_map_offd_C_inverse);\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_mv/par_csr_matop.c", "omp_pragma_line": "#pragma omp parallel for private(ii, i, j)", "context_chars": 100, "text": "t++;\n }\n map_to_B[i] = cnt;\n cnt++;\n }\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor (ii=0; ii < num_threads; ii++)\n {\n HYPRE_Int *A_marker = NULL;\n HYPRE_Int ns, ne, A_col, num_cols, nmax;\n if (ii < rest)\n {\n ns = ii*size+ii;\n ne = (ii+1)*size+ii+1;\n }\n else\n {\n ns = ii*size+rest;\n ne = (ii+1)*size+rest;\n }\n nmax = hypre_max(num_rows, num_cols_offd_B);\n A_marker = hypre_CTAlloc(HYPRE_Int, nmax);\n\n for (i=0; i < num_rows; i++)\n A_marker[i] = -1;\n\n for (i=ns; i < ne; i++)\n D_tmp[i] = 1.0/d[i];\n\n num_cols = C_diag_i[ns];\n for (i=ns; i < ne; i++)\n {\n for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)\n {\n A_col = A_diag_j[j];\n if (A_marker[A_col] < C_diag_i[i]) \n\t {\n\t A_marker[A_col] = num_cols;\n\t C_diag_j[num_cols] = A_col;\n\t C_diag_data[num_cols] = A_diag_data[j];\n\t num_cols++;\n\t }\n else\n\t {\n\t C_diag_data[A_marker[A_col]] += A_diag_data[j];\n\t }\n }\n for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++)\n {\n A_col = B_diag_j[j];\n if (A_marker[A_col] < C_diag_i[i]) \n {\n A_marker[A_col] = num_cols;\n\t C_diag_j[num_cols] = A_col;\n\t C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j];\n\t num_cols++;\n\t }\n else\n\t {\n\t C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j];\n\t }\n }\n }\n\n for (i=0; i < num_cols_offd_B; i++)\n A_marker[i] = -1;\n\n num_cols = C_offd_i[ns];\n for (i=ns; i < ne; i++)\n {\n for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)\n {\n A_col = map_to_B[A_offd_j[j]];\n if (A_marker[A_col] < B_offd_i[i]) \n\t {\n\t A_marker[A_col] = num_cols;\n\t C_offd_j[num_cols] = A_col;\n\t C_offd_data[num_cols] = A_offd_data[j];\n\t num_cols++;\n\t }\n else\n\t {\n\t C_offd_data[A_marker[A_col]] += A_offd_data[j];\n\t }\n }\n for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++)\n {\n A_col = B_offd_j[j];\n if (A_marker[A_col] < B_offd_i[i]) \n {\n A_marker[A_col] = num_cols;\n\t C_offd_j[num_cols] = A_col;\n\t C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j];\n\t num_cols++;\n\t }\n else\n\t {\n\t C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j];\n\t }\n }\n }\n hypre_TFree(A_marker);\n\n } #pragma omp parallel for private(ii, i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_mv/par_csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);\n#ifdef HYPRE_USING_OPENMP\nfor (i = begin; i < end; i++)\n {\n#ifdef HYPRE_USING_PERSISTENT_COMM\n ((HYPRE_Complex *)persistent_comm_handle->send_data)[i - begin]\n#else\n x_buf_data[0][i - begin]\n\n = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "d);\n\n HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_cols_offd; i++)\n col_map_offd_S[i] = col_map_offd_A[i];\n }\n\n\n /*-------------------------------------------------------------------\n * Get the dof_func data for the off-processor columns\n *-------------------------------------------------------------------*/\n\n if (!comm_pkg)\n {\n\thypre_MatvecCommPkgCreate(A);\n\n\tcomm_pkg = hypre_ParCSRMatrixCommPkg(A); \n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " give S same nonzero structure as A */\n hypre_ParCSRMatrixCopy(A,S,0);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_variables; i++)\n {\n diag = A_diag_data[A_diag_i[i]];\n\n /* compute scaling factor and row sum */\n row_scale = 0.0;\n row_sum = diag;\n if (num_functions > 1)\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n if (dof_func[i] == dof_func[A_diag_j[jA]])\n {\n row_scale = hypre_max(row_scale, fabs(A_diag_data[jA]));\n row_sum += fabs(A_diag_data[jA]);\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n if (dof_func[i] == dof_func_offd[A_offd_j[jA]])\n {\n row_scale = hypre_max(row_scale, fabs(A_offd_data[jA]));\n row_sum += fabs(A_offd_data[jA]);\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n row_scale = hypre_max(row_scale, fabs(A_diag_data[jA]));\n row_sum += fabs(A_diag_data[jA]);\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n row_scale = hypre_max(row_scale, fabs(A_offd_data[jA]));\n row_sum += fabs(A_offd_data[jA]);\n }\n }\n\n /* compute row entries of S */\n S_diag_j[A_diag_i[i]] = -1;\n if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0))\n {\n /* make all dependencies weak */\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n S_diag_j[jA] = -1;\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n S_offd_j[jA] = -1;\n }\n }\n else\n {\n if (num_functions > 1)\n { \n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale\n || dof_func[i] != dof_func[A_diag_j[jA]])\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale\n || dof_func[i] != dof_func_offd[A_offd_j[jA]])\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale)\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale)\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n }\n } #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "map_starts[num_sends];\n int_buf_data = hypre_TAlloc(HYPRE_Int, end);\n#ifdef HYPRE_USING_OPENMP\nfor (index = begin; index < end; index++)\n {\n int_buf_data[index - begin] = fine_to_coarse[send_map_elmts[index]] + my_first_cpt;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " \n hypre_ParCSRCommHandleDestroy(comm_handle);\n\n#ifdef HYPRE_USING_OPENMP\nfor (index = begin; index < end; index++)\n {\n int_buf_data[index - begin] = CF_marker[send_map_elmts[index]];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------*/\n S_int_i[0] = 0;\n num_nonzeros = 0;\n#ifdef HYPRE_USING_OPENMP\nfor (j = begin; j < end; j++)\n {\n HYPRE_Int jrow = send_map_elmts[j];\n HYPRE_Int index = 0;\n for (k = S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++)\n {\n if (CF_marker[S_diag_j[k]] > 0) index++;\n }\n for (k = S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++)\n {\n if (CF_marker_offd[S_offd_j[k]] > 0) index++;\n }\n S_int_i[j - begin + 1] = index;\n num_nonzeros += S_int_i[j - begin + 1];\n } #pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_strength.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ypre_sort_and_create_inverse_map(temp, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse);\n\nfor (i=0 ; i < S_ext_offd_size; i++)\n S_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, S_ext_offd_j[i]);\n\n if (num_cols_offd_C) hypre_UnorderedIntMapDestroy(&col_map_offd_C_inverse);\n#else /* !HYPRE_CONCURRENT_HOPSCOTCH */\n HYPRE_Int cnt_offd, cnt_diag, cnt, value;\n S_ext_diag_size = 0;\n S_ext_offd_size = 0;\n\n for (i=0; i < num_cols_offd_S; i++)\n {\n for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)\n {\n if (S_ext_j[j] < my_first_cpt || S_ext_j[j] > my_last_cpt)\n S_ext_offd_size++;\n else\n S_ext_diag_size++;\n }\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "G_OPENMP\n HYPRE_Int *measure_array_temp = hypre_CTAlloc(HYPRE_Int, num_variables+num_cols_offd);\n\nfor (i=0; i < S_offd_i[num_variables]; i++)\n {\n#pragma omp atomic\n measure_array_temp[num_variables + S_offd_j[i]]++;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ables]; i++)\n {\n#pragma omp atomic\n measure_array_temp[num_variables + S_offd_j[i]]++;\n }\n\nfor (i=0; i < num_cols_offd; i++)\n {\n measure_array[i + num_variables] = measure_array_temp[i + num_variables];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ables], buf_data);\n\n /* calculate the local part for the local nodes */\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < S_diag_i[num_variables]; i++)\n {\n#pragma omp atomic\n measure_array_temp[S_diag_j[i]]++;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "_diag_i[num_variables]; i++)\n {\n#pragma omp atomic\n measure_array_temp[S_diag_j[i]]++;\n }\n\nfor (i=0; i < num_variables; i++)\n {\n measure_array[i] = measure_array_temp[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\t\t\t\tgraph_array_offd, graph_offd_size, \n\t\t\t\tCF_marker, CF_marker_offd);*/\n#ifdef HYPRE_USING_OPENMP\nfor (ig = 0; ig < graph_size; ig++)\n {\n i = graph_array[ig];\n if (measure_array[i] > 1)\n {\n CF_marker[i] = 1;\n }\n } #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " > 1)\n {\n CF_marker[i] = 1;\n }\n }\n#ifdef HYPRE_USING_OPENMP\nfor (ig = 0; ig < graph_offd_size; ig++)\n {\n i = graph_array_offd[ig];\n if (measure_array[i+num_variables] > 1)\n {\n CF_marker_offd[i] = 1;\n }\n } #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(ig, i, jS, j, jj) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "nt set\n *-------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\nfor (ig = 0; ig < graph_size; ig++)\n {\n i = graph_array[ig];\n if (measure_array[i] > 1)\n {\n for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)\n {\n j = S_diag_j[jS];\n if (measure_array[j] > 1)\n {\n if (measure_array[i] > measure_array[j])\n CF_marker[j] = 0;\n else if (measure_array[j] > measure_array[i])\n CF_marker[i] = 0;\n }\n } /* for each local neighbor j of i */\n for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)\n {\n jj = S_offd_j[jS];\n j = num_variables+jj;\n if (measure_array[j] > 1)\n {\n if (measure_array[i] > measure_array[j])\n CF_marker_offd[jj] = 0;\n else if (measure_array[j] > measure_array[i])\n CF_marker[i] = 0;\n }\n }\n } /* for each node with measure > 1 */\n } #pragma omp parallel for private(ig, i, jS, j, jj) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_coarsen.c", "omp_pragma_line": "#pragma omp parallel for private(ig, i, jS, j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "pts and F-pts.\n *------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\nfor (ig = 0; ig < graph_size; ig++) {\n i = graph_array[ig];\n\n /*---------------------------------------------\n\t* If the measure of i is smaller than 1, then\n * make i and F point (because it does not influence\n * any other point)\n\t*---------------------------------------------*/\n\n if(measure_array[i]<1.) CF_marker[i]= F_PT;\n\n /*---------------------------------------------\n\t* First treat the case where point i is in the\n\t* independent set: make i a C point, \n\t*---------------------------------------------*/\n \n if (CF_marker[i] > 0) CF_marker[i] = C_PT;\n\n /*---------------------------------------------\n\t* Now treat the case where point i is not in the\n\t* independent set: loop over\n\t* all the points j that influence equation i; if\n\t* j is a C point, then make i an F point.\n\t*---------------------------------------------*/\n\n else \n {\n\t /* first the local part */\n\t for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) \n {\n\t /* j is the column number, or the local number of the point influencing i */\n\t j = S_diag_j[jS];\n\t if (CF_marker[j] > 0) /* j is a C-point */\n\t CF_marker[i] = F_PT;\n\t }\n\t /* now the external part */\n\t for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) \n\t {\n\t j = S_offd_j[jS];\n\t if (CF_marker_offd[j] > 0) /* j is a C-point */\n\t CF_marker[i] = F_PT;\n\t }\n\n } /* end else */\n } #pragma omp parallel for private(ig, i, jS, j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "coarse = hypre_CTAlloc(HYPRE_Int, n_fine);\n\n n_coarse = 0;\n n_SF = 0;\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < n_fine; i++)\n if (CF_marker[i] == 1) n_coarse++;\n else if (CF_marker[i] == -3) n_SF++;\n\n pass_array_size = n_fine-n_coarse-n_SF;\n if (pass_array_size) pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size);\n pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes+1);\n if (n_fine) assigned = hypre_CTAlloc(HYPRE_Int, n_fine);\n P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);\n P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);\n if (n_coarse) C_array = hypre_CTAlloc(HYPRE_Int, n_coarse);\n\n if (num_cols_offd)\n {\n CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);\n if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);\n } #pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "stroy(comm_handle);\n }\n }\n\n n_coarse_offd = 0;\n n_SF_offd = 0;\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < num_cols_offd; i++)\n if (CF_marker_offd[i] == 1) n_coarse_offd++;\n else if (CF_marker_offd[i] == -3) n_SF_offd++;\n\n if (num_cols_offd)\n {\n assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);\n map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd);\n fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);\n new_col_map_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd);\n } #pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "])\n P_ncols = hypre_CTAlloc(HYPRE_Int,send_map_start[num_sends]);\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < num_cols_offd+1; i++)\n { Pext_i[i] = 0; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "dif \n for (i=0; i < num_cols_offd+1; i++)\n { Pext_i[i] = 0; }\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < send_map_start[num_sends]; i++)\n { P_ncols[i] = 0; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "pass][0] = 0;\n \n for (i=0; i < num_sends; i++)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (j=send_map_start[i]; j < send_map_start[i+1]; j++)\n {\n j1 = send_map_elmt[j];\n if (assigned[j1] == pass-1)\n {\n P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1];\n Pext_send_size += P_ncols[j];\n }\n } #pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "offd_i[i+1] += P_offd_i[i];\n }\n\n/* determine P for coarse points */\n \n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < n_coarse; i++)\n {\n i1 = C_array[i];\n P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1];\n P_diag_data[P_diag_i[i1]] = 1.0;\n } #pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " else\n { P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); }\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < new_num_cols_offd; i++)\n { P_marker_offd[i] = 0; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "P);\n permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes-1]);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < new_counter[num_passes-1]; i++)\n permute[i] = -1;\n\n cnt = 0;\n for (i=0; i < num_passes-1; i++)\n {\n for (j=new_counter[i]; j < new_counter[i+1]; j++)\n {\n if (P_marker_offd[j])\n {\n col_map_offd_P[cnt] = new_elmts[i][j-new_counter[i]];\n permute[j] = col_map_offd_P[cnt++];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,k1) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n }\n\n hypre_qsort0(col_map_offd_P,0,num_cols_offd_P-1);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < new_counter[num_passes-1]; i++)\n {\n k1 = permute[i];\n if (k1 != -1)\n permute[i] = hypre_BinarySearch(col_map_offd_P,k1,num_cols_offd_P);\n } #pragma omp parallel for private(i,k1) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "mute[i] = hypre_BinarySearch(col_map_offd_P,k1,num_cols_offd_P);\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < P_offd_size; i++)\n { P_offd_j[i] = permute[P_offd_j[i]]; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_multi_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P;\n }\n\n if (n_SF)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < n_fine; i++)\n if (CF_marker[i] == -3) CF_marker[i] = -1;\n }\n\n if (num_procs > 1)\n {\n hypre_MatvecCommPkgCreate(P);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_lr_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "r_offd, fine_to_coarse_offd);\n }\n\n hypre_MatvecCommPkgCreate(P);\n \n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < n_fine; i++)\n if (CF_marker[i] == -3) CF_marker[i] = -1;\n \n *P_ptr = P;\n\n /* Deallocate memory */ \n hypre_TFree(max_num_threads);\n hypre_TFree(fine_to_coarse);\n hypre_TFree(diag_offset);\n hypre_TFree(offd_offset);\n hypre_TFree(fine_to_coarse_offset);\n \n if (num_procs > 1) \n {\n hypre_CSRMatrixDestroy(Sop);\n hypre_CSRMatrixDestroy(A_ext);\n hypre_TFree(fine_to_coarse_offd);\n hypre_TFree(CF_marker_offd);\n hypre_TFree(tmp_CF_marker_offd);\n if(num_functions > 1)\n hypre_TFree(dof_func_offd);\n\n\n hypre_MatvecCommPkgDestroy(extend_comm_pkg);\n \n\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/ams.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "CSRCommHandleDestroy(comm_handle);\n hypre_TFree(int_buf_data);\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor (k = 0; k < num_threads; k++)\n {\n size = num_rows/num_threads;\n rest = num_rows - size*num_threads;\n if (k < rest)\n {\n ns = k*size+k;\n ne = (k+1)*size+k+1;\n }\n else\n {\n ns = k*size+rest;\n ne = (k+1)*size+rest;\n }\n\n if (option == 1)\n {\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = 0.0;\n if (cf_marker == NULL)\n {\n /* Add the l1 norm of the diag part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)\n l1_norm[i] += fabs(A_diag_data[j]);\n /* Add the l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)\n l1_norm[i] += fabs(A_offd_data[j]);\n }\n }\n else\n {\n cf_diag = cf_marker[i];\n /* Add the CF l1 norm of the diag part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)\n if (cf_diag == cf_marker[A_diag_J[j]])\n l1_norm[i] += fabs(A_diag_data[j]);\n /* Add the CF l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)\n if (cf_diag == cf_marker_offd[A_offd_J[j]])\n l1_norm[i] += fabs(A_offd_data[j]);\n }\n }\n }\n }\n else if (option == 2)\n {\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = 0.0;\n if (cf_marker == NULL)\n {\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)\n {\n ii = A_diag_J[j];\n if (ii == i || ii < ns || ii >= ne)\n l1_norm[i] += fabs(A_diag_data[j]);\n }\n /* Add the l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)\n l1_norm[i] += fabs(A_offd_data[j]);\n }\n }\n else\n {\n cf_diag = cf_marker[i];\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)\n {\n ii = A_diag_J[j];\n if ((ii == i || ii < ns || ii >= ne) &&\n (cf_diag == cf_marker[A_diag_J[j]]))\n l1_norm[i] += fabs(A_diag_data[j]);\n }\n /* Add the CF l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)\n if (cf_diag == cf_marker_offd[A_offd_J[j]])\n l1_norm[i] += fabs(A_offd_data[j]);\n }\n }\n }\n }\n else if (option == 3)\n {\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = 0.0;\n for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)\n l1_norm[i] += A_diag_data[j] * A_diag_data[j];\n if (num_cols_offd)\n for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)\n l1_norm[i] += A_offd_data[j] * A_offd_data[j];\n }\n }\n else if (option == 4)\n {\n for (i = ns; i < ne; i++)\n {\n l1_norm[i] = 0.0;\n if (cf_marker == NULL)\n {\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)\n {\n ii = A_diag_J[j];\n if (ii == i || ii < ns || ii >= ne)\n {\n if (ii == i)\n {\n diag = fabs(A_diag_data[j]);\n l1_norm[i] += fabs(A_diag_data[j]);\n }\n else\n l1_norm[i] += 0.5*fabs(A_diag_data[j]);\n }\n }\n /* Add the l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)\n l1_norm[i] += 0.5*fabs(A_offd_data[j]);\n }\n }\n else\n {\n cf_diag = cf_marker[i];\n /* Add the diagonal and the local off-thread part of the ith row */\n for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)\n {\n ii = A_diag_J[j];\n if ((ii == i || ii < ns || ii >= ne) &&\n (cf_diag == cf_marker[A_diag_J[j]]))\n {\n if (ii == i)\n {\n diag = fabs(A_diag_data[j]);\n l1_norm[i] += fabs(A_diag_data[j]);\n }\n else\n l1_norm[i] += 0.5*fabs(A_diag_data[j]);\n }\n }\n /* Add the CF l1 norm of the offd part of the ith row */\n if (num_cols_offd)\n {\n for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)\n if (cf_diag == cf_marker_offd[A_offd_J[j]])\n l1_norm[i] += 0.5*fabs(A_offd_data[j]);\n }\n }\n\n /* Truncate according to Remark 6.2 */\n if (l1_norm[i] <= 4.0/3.0*diag)\n l1_norm[i] = diag;\n }\n }\n\n /* Handle negative definite matrices */\n for (i = ns; i < ne; i++)\n if (A_diag_data[A_diag_I[i]] < 0)\n l1_norm[i] = -l1_norm[i];\n\n for (i = ns; i < ne; i++)\n /* if (fabs(l1_norm[i]) < DBL_EPSILON) */\n if (fabs(l1_norm[i]) == 0.0)\n {\n hypre_error_in_arg(1);\n break;\n }\n\n } #pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/ams.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " comm_handle = NULL;\n }\n\n if (relax_type == 1) /* Jacobi */\n {\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/ams.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "r (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n }\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n if (A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n res -= A_diag_data[jj] * Vtemp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += (relax_weight*res)/l1_norms[i];\n }\n } #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/ams.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "1 && omega == 1)\n {\n tmp_data = hypre_CTAlloc(HYPRE_Real,n);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* interior points first */\n {\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n for (i = ne-1; i > ns-1; i--) /* interior points first */\n {\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/ams.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "dif\n for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* interior points first */\n {\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n for (i = ne-1; i > ns-1; i--) /* interior points first */\n {\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/ams.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "mega*(1.0-relax_weight);\n tmp_data = hypre_CTAlloc(HYPRE_Real,n);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n tmp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/ams.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "0; i < n; i++)\n {\n tmp_data[i] = u_data[i];\n }\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* interior points first */\n {\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (A_diag_data[A_diag_i[i]] != zero)\n {\n res2 = 0.0;\n res = f_data[i];\n Vtemp_data[i] = u_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n if (ii < i)\n res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += (c1*res + c2*res2) / l1_norms[i];\n }\n }\n for (i = ne-1; i > ns-1; i--) /* interior points first */\n {\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (A_diag_data[A_diag_i[i]] != zero)\n {\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n if (ii > i)\n res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += (c1*res + c2*res2) / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " beta = gamma / gamma_old;\n \n /* p = s + beta p */\n#ifdef HYPRE_USING_OPENMP\nfor (j=0; j < local_size; j++)\n {\n p_data[j] = s_data[j] + beta*p_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": " hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);\n mult = coefs[i];\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ )\n {\n u_data[j] = mult * r_data[j] + v_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": " u_data[j] = mult * r_data[j] + v_data[j];\n }\n \n }\n\n#ifdef HYPRE_USING_OPENMP\nfor ( i = 0; i < num_rows; i++ ) \n {\n u_data[i] = orig_u[i] + u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "s_data and get scaled residual: r = D^(-1/2)f -\n * D^(-1/2)A*u */\n\n\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_rows; j++)\n {\n diag = A_diag_data[A_diag_i[j]];\n ds_data[j] = 1/sqrt(diag);\n\n r_data[j] = ds_data[j] * f_data[j];\n } #pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "ta[j];\n }\n\n hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ ) \n {\n r_data[j] += ds_data[j] * tmp_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "then start \n the iteration by multiplying r by the cheby coef.*/\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ ) \n {\n orig_u[j] = u_data[j]; /* orig, unscaled u */\n\n u_data[j] = r_data[j] * coefs[cheby_order]; \n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "y_order - 1; i >= 0; i-- ) \n {\n /* v = D^(-1/2)AD^(-1/2)u */\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ )\n {\n tmp_data[j] = ds_data[j] * u_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j,tmp_d) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "ec, 0.0, v);\n\n /* u_new = coef*r + v*/\n mult = coefs[i];\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ )\n {\n tmp_d = ds_data[j]* v_data[j];\n u_data[j] = mult * r_data[j] + tmp_d;\n } #pragma omp parallel for private(j,tmp_d) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "op */\n\n /* now we have to scale u_data before adding it to u_orig*/\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ ) \n {\n u_data[j] = orig_u[j] + ds_data[j]*u_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "*-----------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "---------------------------------*/\n \n if (relax_points == 0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n \n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n if (A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n res -= A_diag_data[jj] * Vtemp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += (relax_weight*res)/l1_norms[i];\n }\n } #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax_more.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------------------------*/\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n \n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n && A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n res -= A_diag_data[jj] * Vtemp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += (relax_weight * res)/l1_norms[i];\n }\n } #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_nongalerkin.c", "omp_pragma_line": "#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " S same nonzero structure as A */\n hypre_ParCSRMatrixCopy(A,S,1);\n \n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_variables; i++)\n {\n diag = A_diag_data[A_diag_i[i]];\n \n /* compute scaling factor and row sum */\n row_scale = 0.0;\n row_sum = diag;\n if (num_functions > 1)\n {\n if (diag < 0)\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n if (dof_func[i] == dof_func[A_diag_j[jA]])\n {\n row_scale = hypre_max(row_scale, A_diag_data[jA]);\n row_sum += A_diag_data[jA];\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n if (dof_func[i] == dof_func_offd[A_offd_j[jA]])\n {\n row_scale = hypre_max(row_scale, A_offd_data[jA]);\n row_sum += A_offd_data[jA];\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n if (dof_func[i] == dof_func[A_diag_j[jA]])\n {\n row_scale = hypre_min(row_scale, A_diag_data[jA]);\n row_sum += A_diag_data[jA];\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n if (dof_func[i] == dof_func_offd[A_offd_j[jA]])\n {\n row_scale = hypre_min(row_scale, A_offd_data[jA]);\n row_sum += A_offd_data[jA];\n }\n }\n }\n }\n else\n {\n if (diag < 0)\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n row_scale = hypre_max(row_scale, A_diag_data[jA]);\n row_sum += A_diag_data[jA];\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n row_scale = hypre_max(row_scale, A_offd_data[jA]);\n row_sum += A_offd_data[jA];\n }\n }\n else\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n row_scale = hypre_min(row_scale, A_diag_data[jA]);\n row_sum += A_diag_data[jA];\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n row_scale = hypre_min(row_scale, A_offd_data[jA]);\n row_sum += A_offd_data[jA];\n }\n }\n }\n \n /* compute row entries of S */\n S_diag_j[A_diag_i[i]] = -1;\n if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0))\n {\n /* make all dependencies weak */\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n S_diag_j[jA] = -1;\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n S_offd_j[jA] = -1;\n }\n }\n else\n {\n if (num_functions > 1)\n {\n if (diag < 0)\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n if (A_diag_data[jA] <= strength_threshold * row_scale\n || dof_func[i] != dof_func[A_diag_j[jA]])\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n if (A_offd_data[jA] <= strength_threshold * row_scale\n || dof_func[i] != dof_func_offd[A_offd_j[jA]])\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n if (A_diag_data[jA] >= strength_threshold * row_scale\n || dof_func[i] != dof_func[A_diag_j[jA]])\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n if (A_offd_data[jA] >= strength_threshold * row_scale\n || dof_func[i] != dof_func_offd[A_offd_j[jA]])\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n }\n else\n {\n if (diag < 0)\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n if (A_diag_data[jA] <= strength_threshold * row_scale)\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n if (A_offd_data[jA] <= strength_threshold * row_scale)\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n else\n {\n for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)\n {\n if (A_diag_data[jA] >= strength_threshold * row_scale)\n {\n S_diag_j[jA] = -1;\n }\n }\n for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)\n {\n if (A_offd_data[jA] >= strength_threshold * row_scale)\n {\n S_offd_j[jA] = -1;\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_nongalerkin.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " Use drop-tolerance to compute new entries for sparsity pattern\n */\n/*#ifdef HYPRE_USING_OPENMP\n*/ \n for(i = 0; i < num_variables; i++)\n {\n global_row = i+first_col_diag_RAP;\n \n /* Compute the drop tolerance for this row, which is just\n * abs(max of row i)*droptol */\n max_entry = -1.0;\n for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)\n {\n if( (RAP_diag_j[j] != i) && (max_entry < fabs(RAP_diag_data[j]) ) )\n { max_entry = fabs(RAP_diag_data[j]); }\n }\n for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)\n {\n {\n if( max_entry < fabs(RAP_offd_data[j]) )\n { max_entry = fabs(RAP_offd_data[j]); }\n }\n }\n max_entry *= droptol;\n max_entry_offd = max_entry*collapse_beta;\n \n\n /* Loop over diag portion, adding all entries that are \"strong\" */\n for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)\n {\n if( fabs(RAP_diag_data[j]) > max_entry )\n {\n global_col = RAP_diag_j[j] + first_col_diag_RAP;\n/*#ifdef HYPRE_USING_OPENMP\n#pragma omp critical (IJAdd)\n\n{*/ \n /* For efficiency, we do a buffered IJAddToValues\n * A[global_row, global_col] += 1.0 */\n hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, \n &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, \n global_col, 1.0 ); \n if(sym_collapse)\n {\n hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,\n ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,\n &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,\n global_col, global_row, 1.0 );\n }\n/*}*/ \n }\n } \n\n /* Loop over offd portion, adding all entries that are \"strong\" */\n for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)\n {\n if( fabs(RAP_offd_data[j]) > max_entry_offd )\n {\n global_col = col_map_offd_RAP[ RAP_offd_j[j] ];\n/*#ifdef HYPRE_USING_OPENMP\n#pragma omp critical (IJAdd)\n\n{*/\n /* For efficiency, we do a buffered IJAddToValues\n * A[global_row, global_col] += 1.0 */\n hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, \n &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, \n global_col, 1.0 ); \n if(sym_collapse)\n {\n hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,\n ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,\n &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,\n global_col, global_row, 1.0 );\n }\n/*}*/ \n }\n }\n\n } #pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "(diagonal) */\n ds_data = hypre_CTAlloc(HYPRE_Real, num_rows);\n \n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_rows; j++)\n {\n diag = A_diag_data[A_diag_i[j]];\n ds_data[j] = 1/sqrt(diag);\n } #pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": " hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);\n mult = coefs[i];\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ )\n {\n u_data[j] = mult * r_data[j] + v_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": " u_data[j] = mult * r_data[j] + v_data[j];\n }\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor ( i = 0; i < num_rows; i++ ) \n {\n u_data[i] = orig_u[i] + u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "D^(-1/2)A*u */\n\n hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ ) \n {\n r_data[j] = ds_data[j] * (f_data[j] + tmp_data[j]);\n\n orig_u[j] = u_data[j]; /* orig, unscaled u */\n\n u_data[j] = r_data[j] * coefs[cheby_order]; \n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "y_order - 1; i >= 0; i-- ) \n {\n /* v = D^(-1/2)AD^(-1/2)u */\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ )\n {\n tmp_data[j] = ds_data[j] * u_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "ec, 0.0, v);\n\n /* u_new = coef*r + v*/\n mult = coefs[i];\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ )\n {\n u_data[j] = mult * r_data[j] + ds_data[j]*v_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_cheby.c", "omp_pragma_line": "#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE ", "context_chars": 100, "text": "op */\n\n /* now we have to scale u_data before adding it to u_orig*/\n\n#ifdef HYPRE_USING_OPENMP\nfor ( j = 0; j < num_rows; j++ ) \n {\n u_data[j] = orig_u[j] + ds_data[j]*u_data[j];\n } #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "p_elmts_set, 2*(send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0]), 16*hypre_NumThreads());\n\nfor (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)\n {\n HYPRE_Int key = send_map_elmts_RT[i];\n hypre_UnorderedIntSetPut(&send_map_elmts_set, key);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "dIntMapCreate(&send_map_elmts_RT_inverse_map, 2*send_map_elmts_unique_size, 16*hypre_NumThreads());\nfor (i = 0; i < send_map_elmts_unique_size; i++)\n {\n hypre_UnorderedIntMapPutIfAbsent(&send_map_elmts_RT_inverse_map, send_map_elmts_unique[i], i);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ");\n send_map_elmts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT]);\n\nfor (i = 0; i < send_map_elmts_unique_size; i++)\n {\n send_map_elmts_starts_RT_aggregated[i] = 0;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "d_map_elmts_unique_size; i++)\n {\n send_map_elmts_starts_RT_aggregated[i] = 0;\n }\n\nfor (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)\n {\n HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);\n#pragma omp atomic\n send_map_elmts_starts_RT_aggregated[idx]++;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "end_map_elmts_starts_RT_aggregated[send_map_elmts_unique_size] = send_map_starts_RT[num_sends_RT];\n\nfor (i = send_map_starts_RT[num_sends_RT] - 1; i >= send_map_starts_RT[0]; i--)\n {\n HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);\n HYPRE_Int offset = hypre_fetch_and_add(send_map_elmts_starts_RT_aggregated + idx, -1) - 1;\n send_map_elmts_RT_aggregated[offset] = i;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "_and_create_inverse_map(temp, num_cols_offd_Pext, &col_map_offd_Pext, &col_map_offd_Pext_inverse);\n\nfor (i=0 ; i < P_ext_offd_size; i++)\n P_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_Pext_inverse, P_ext_offd_j[i]);\n if (num_cols_offd_Pext) hypre_UnorderedIntMapDestroy(&col_map_offd_Pext_inverse);\n }\n#else /* !HYPRE_CONCURRENT_HOPSCOTCH */\n if (P_ext_offd_size || num_cols_offd_P)\n {\n temp = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size+num_cols_offd_P);\n for (i=0; i < P_ext_offd_size; i++)\n temp[i] = P_ext_offd_j[i];\n cnt = P_ext_offd_size;\n for (i=0; i < num_cols_offd_P; i++)\n temp[cnt++] = col_map_offd_P[i];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "m_cols_offd_RT)\n {\n jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);\n\n#ifdef HYPRE_USING_OPENMP\nfor (ii = 0; ii < num_threads; ii++)\n {\n size = num_cols_offd_RT/num_threads;\n rest = num_cols_offd_RT - size*num_threads;\n if (ii < rest)\n {\n ns = ii*size+ii;\n ne = (ii+1)*size+ii+1;\n }\n else\n {\n ns = ii*size+rest;\n ne = (ii+1)*size+rest;\n }\n \n /*-----------------------------------------------------------------------\n * Allocate marker arrays.\n *-----------------------------------------------------------------------*/\n\n if (num_cols_offd_Pext || num_cols_diag_P)\n {\n P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_Pext);\n P_marker = P_mark_array[ii];\n }\n A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A);\n A_marker = A_mark_array[ii];\n /*-----------------------------------------------------------------------\n * Initialize some stuff.\n *-----------------------------------------------------------------------*/\n\n jj_counter = start_indexing;\n for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++)\n { \n P_marker[ic] = -1;\n }\n for (i = 0; i < num_nz_cols_A; i++)\n { \n A_marker[i] = -1;\n } \n\n /*-----------------------------------------------------------------------\n * Loop over exterior c-points\n *-----------------------------------------------------------------------*/\n \n for (ic = ns; ic < ne; ic++)\n {\n \n jj_row_begining = jj_counter;\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of R_offd.\n *--------------------------------------------------------------------*/\n \n for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++)\n {\n i1 = R_offd_j[jj1];\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_offd.\n *-----------------------------------------------------------------*/\n \n for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)\n {\n i2 = A_offd_j[jj2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2] = ic;\n \n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_ext.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)\n {\n i3 = P_ext_diag_j[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n jj_counter++;\n }\n }\n for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)\n {\n i3 = P_ext_offd_j[jj3] + num_cols_diag_P;\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n jj_counter++;\n }\n }\n }\n }\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_diag.\n *-----------------------------------------------------------------*/\n \n for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)\n {\n i2 = A_diag_j[jj2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2+num_cols_offd_A] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2+num_cols_offd_A] = ic;\n \n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_diag.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)\n {\n i3 = P_diag_j[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n jj_counter++;\n }\n }\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_offd.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)\n {\n i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n jj_counter++;\n }\n }\n }\n }\n }\n }\n\n jj_count[ii] = jj_counter;\n\n } #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "----------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\nfor (ii = 0; ii < num_threads; ii++)\n {\n size = num_cols_offd_RT/num_threads;\n rest = num_cols_offd_RT - size*num_threads;\n if (ii < rest)\n {\n ns = ii*size+ii;\n ne = (ii+1)*size+ii+1;\n }\n else\n {\n ns = ii*size+rest;\n ne = (ii+1)*size+rest;\n }\n\n /*-----------------------------------------------------------------------\n * Initialize some stuff.\n *-----------------------------------------------------------------------*/\n if (num_cols_offd_Pext || num_cols_diag_P)\n P_marker = P_mark_array[ii];\n A_marker = A_mark_array[ii];\n\n jj_counter = start_indexing;\n if (ii > 0) jj_counter = jj_count[ii-1];\n\n for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++)\n { \n P_marker[ic] = -1;\n }\n for (i = 0; i < num_nz_cols_A; i++)\n { \n A_marker[i] = -1;\n } \n \n /*-----------------------------------------------------------------------\n * Loop over exterior c-points.\n *-----------------------------------------------------------------------*/\n \n for (ic = ns; ic < ne; ic++)\n {\n \n jj_row_begining = jj_counter;\n RAP_int_i[ic] = jj_counter;\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of R_offd.\n *--------------------------------------------------------------------*/\n \n for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++)\n {\n i1 = R_offd_j[jj1];\n r_entry = R_offd_data[jj1];\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_offd.\n *-----------------------------------------------------------------*/\n \n for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)\n {\n i2 = A_offd_j[jj2];\n r_a_product = r_entry * A_offd_data[jj2];\n \n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2] = ic;\n \n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_ext.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)\n {\n i3 = P_ext_diag_j[jj3];\n r_a_p_product = r_a_product * P_ext_diag_data[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n RAP_int_data[jj_counter] = r_a_p_product;\n RAP_int_j[jj_counter] = i3 + first_col_diag_P;\n jj_counter++;\n }\n else\n {\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n\n for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)\n {\n i3 = P_ext_offd_j[jj3] + num_cols_diag_P;\n r_a_p_product = r_a_product * P_ext_offd_data[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n RAP_int_data[jj_counter] = r_a_p_product;\n RAP_int_j[jj_counter] \n = col_map_offd_Pext[i3-num_cols_diag_P];\n jj_counter++;\n }\n else\n {\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n }\n\n /*--------------------------------------------------------------\n * If i2 is previously visited ( A_marker[12]=ic ) it yields\n * no new entries in RAP and can just add new contributions.\n *--------------------------------------------------------------*/\n\n else\n {\n for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)\n {\n i3 = P_ext_diag_j[jj3];\n r_a_p_product = r_a_product * P_ext_diag_data[jj3];\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)\n {\n i3 = P_ext_offd_j[jj3] + num_cols_diag_P;\n r_a_p_product = r_a_product * P_ext_offd_data[jj3];\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n }\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_diag.\n *-----------------------------------------------------------------*/\n \n for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)\n {\n i2 = A_diag_j[jj2];\n r_a_product = r_entry * A_diag_data[jj2];\n \n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (A_marker[i2+num_cols_offd_A] != ic)\n {\n\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2+num_cols_offd_A] = ic;\n \n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_diag.\n *-----------------------------------------------------------*/\n\n for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)\n {\n i3 = P_diag_j[jj3];\n r_a_p_product = r_a_product * P_diag_data[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n RAP_int_data[jj_counter] = r_a_p_product;\n RAP_int_j[jj_counter] = i3 + first_col_diag_P;\n jj_counter++;\n }\n else\n {\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)\n {\n i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;\n r_a_p_product = r_a_product * P_offd_data[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begining)\n {\n P_marker[i3] = jj_counter;\n RAP_int_data[jj_counter] = r_a_p_product;\n RAP_int_j[jj_counter] = \n col_map_offd_Pext[i3-num_cols_diag_P];\n jj_counter++;\n }\n else\n {\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n }\n\n /*--------------------------------------------------------------\n * If i2 is previously visited ( A_marker[12]=ic ) it yields\n * no new entries in RAP and can just add new contributions.\n *--------------------------------------------------------------*/\n\n else\n {\n for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)\n {\n i3 = P_diag_j[jj3];\n r_a_p_product = r_a_product * P_diag_data[jj3];\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)\n {\n i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;\n r_a_p_product = r_a_product * P_offd_data[jj3];\n RAP_int_data[P_marker[i3]] += r_a_p_product;\n }\n }\n }\n }\n }\n if (num_cols_offd_Pext || num_cols_diag_P)\n hypre_TFree(P_mark_array[ii]);\n hypre_TFree(A_mark_array[ii]);\n } #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "----------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < RAP_ext_size; i++)\n if (RAP_ext_j[i] < first_col_diag_RAP \n || RAP_ext_j[i] > last_col_diag_RAP)\n RAP_ext_j[i] = num_cols_diag_P\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n + hypre_UnorderedIntMapGet(&col_map_offd_RAP_inverse, RAP_ext_j[i]);\n#else\n + hypre_BinarySearch(col_map_offd_RAP,\n RAP_ext_j[i],num_cols_offd_RAP);\n\n else\n RAP_ext_j[i] -= first_col_diag_RAP;\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n if (num_cols_offd_RAP)\n hypre_UnorderedIntMapDestroy(&col_map_offd_RAP_inverse);\n\n\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();\n hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime();\n\n\n/* need to allocate new P_marker etc. and make further changes */\n /*-----------------------------------------------------------------------\n * Initialize some stuff.\n *-----------------------------------------------------------------------*/\n jj_cnt_diag = hypre_CTAlloc(HYPRE_Int, num_threads);\n jj_cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads);\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE\n\n for (ii = 0; ii < num_threads; ii++)\n {\n size = num_cols_diag_RT/num_threads;\n rest = num_cols_diag_RT - size*num_threads;\n if (ii < rest)\n {\n ns = ii*size+ii;\n ne = (ii+1)*size+ii+1;\n }\n else\n {\n ns = ii*size+rest;\n ne = (ii+1)*size+rest;\n }\n\n P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_RAP);\n A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A);\n P_marker = P_mark_array[ii];\n A_marker = A_mark_array[ii];\n jj_count_diag = start_indexing;\n jj_count_offd = start_indexing;\n\n for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++)\n { \n P_marker[ic] = -1;\n }\n for (i = 0; i < num_nz_cols_A; i++)\n { \n A_marker[i] = -1;\n } \n\n /*-----------------------------------------------------------------------\n * Loop over interior c-points.\n *-----------------------------------------------------------------------*/\n \n for (ic = ns; ic < ne; ic++)\n {\n \n /*--------------------------------------------------------------------\n * Set marker for diagonal entry, RAP_{ic,ic}. and for all points\n * being added to row ic of RAP_diag and RAP_offd through RAP_ext\n *--------------------------------------------------------------------*/\n\n jj_row_begin_diag = jj_count_diag;\n jj_row_begin_offd = jj_count_offd;\n\n if (square)\n P_marker[ic] = jj_count_diag++;\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n if (send_map_elmts_RT_inverse_map_initialized)\n {\n HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);\n if (i != -1)\n {\n for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++)\n {\n HYPRE_Int jj = send_map_elmts_RT_aggregated[j];\n for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++)\n {\n jcol = RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n jj_count_diag++;\n }\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n }\n } // if (set)\n }\n#else /* !HYPRE_CONCURRENT_HOPSCOTCH */\n for (i=0; i < num_sends_RT; i++)\n for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++)\n if (send_map_elmts_RT[j] == ic)\n {\n for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++)\n {\n jcol = RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n jj_count_diag++;\n }\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n break;\n }\n /* !HYPRE_CONCURRENT_HOPSCOTCH */\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of R_diag.\n *--------------------------------------------------------------------*/\n \n for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++)\n {\n i1 = R_diag_j[jj1];\n \n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_offd.\n *-----------------------------------------------------------------*/\n \n if (num_cols_offd_A)\n {\n for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)\n {\n i2 = A_offd_j[jj2];\n \n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n \n if (A_marker[i2] != ic)\n {\n \n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n \n A_marker[i2] = ic;\n \n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_ext.\n *-----------------------------------------------------------*/\n \n for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)\n {\n i3 = P_ext_diag_j[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begin_diag)\n {\n P_marker[i3] = jj_count_diag;\n jj_count_diag++;\n }\n }\n for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)\n {\n i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]]+num_cols_diag_P;\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begin_offd)\n {\n P_marker[i3] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n }\n }\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_diag.\n *-----------------------------------------------------------------*/\n \n for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)\n {\n i2 = A_diag_j[jj2];\n \n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n \n if (A_marker[i2+num_cols_offd_A] != ic)\n {\n \n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n \n A_marker[i2+num_cols_offd_A] = ic;\n \n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_diag.\n *-----------------------------------------------------------*/\n \n for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)\n {\n i3 = P_diag_j[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n \n if (P_marker[i3] < jj_row_begin_diag)\n {\n P_marker[i3] = jj_count_diag;\n jj_count_diag++;\n }\n }\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_offd.\n *-----------------------------------------------------------*/\n\n if (num_cols_offd_P)\n { \n for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)\n {\n i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P;\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n \n if (P_marker[i3] < jj_row_begin_offd)\n {\n P_marker[i3] = jj_count_offd;\n jj_count_offd++;\n }\n }\n } \n }\n }\n }\n \n /*--------------------------------------------------------------------\n * Set RAP_diag_i and RAP_offd_i for this row.\n *--------------------------------------------------------------------*/\n/* \n RAP_diag_i[ic] = jj_row_begin_diag;\n RAP_offd_i[ic] = jj_row_begin_offd;\n*/ \n }\n jj_cnt_diag[ii] = jj_count_diag;\n jj_cnt_offd[ii] = jj_count_offd;\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "t, num_threads);\n jj_cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads);\n\n#ifdef HYPRE_USING_OPENMP\nfor (ii = 0; ii < num_threads; ii++)\n {\n size = num_cols_diag_RT/num_threads;\n rest = num_cols_diag_RT - size*num_threads;\n if (ii < rest)\n {\n ns = ii*size+ii;\n ne = (ii+1)*size+ii+1;\n }\n else\n {\n ns = ii*size+rest;\n ne = (ii+1)*size+rest;\n }\n\n P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_RAP);\n A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A);\n P_marker = P_mark_array[ii];\n A_marker = A_mark_array[ii];\n jj_count_diag = start_indexing;\n jj_count_offd = start_indexing;\n\n for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++)\n { \n P_marker[ic] = -1;\n }\n for (i = 0; i < num_nz_cols_A; i++)\n { \n A_marker[i] = -1;\n } \n\n /*-----------------------------------------------------------------------\n * Loop over interior c-points.\n *-----------------------------------------------------------------------*/\n \n for (ic = ns; ic < ne; ic++)\n {\n \n /*--------------------------------------------------------------------\n * Set marker for diagonal entry, RAP_{ic,ic}. and for all points\n * being added to row ic of RAP_diag and RAP_offd through RAP_ext\n *--------------------------------------------------------------------*/\n\n jj_row_begin_diag = jj_count_diag;\n jj_row_begin_offd = jj_count_offd;\n\n if (square)\n P_marker[ic] = jj_count_diag++;\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n if (send_map_elmts_RT_inverse_map_initialized)\n {\n HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);\n if (i != -1)\n {\n for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++)\n {\n HYPRE_Int jj = send_map_elmts_RT_aggregated[j];\n for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++)\n {\n jcol = RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n jj_count_diag++;\n }\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n }\n } // if (set)\n }\n#else /* !HYPRE_CONCURRENT_HOPSCOTCH */\n for (i=0; i < num_sends_RT; i++)\n for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++)\n if (send_map_elmts_RT[j] == ic)\n {\n for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++)\n {\n jcol = RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n jj_count_diag++;\n }\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n break;\n }\n /* !HYPRE_CONCURRENT_HOPSCOTCH */\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of R_diag.\n *--------------------------------------------------------------------*/\n \n for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++)\n {\n i1 = R_diag_j[jj1];\n \n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_offd.\n *-----------------------------------------------------------------*/\n \n if (num_cols_offd_A)\n {\n for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)\n {\n i2 = A_offd_j[jj2];\n \n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n \n if (A_marker[i2] != ic)\n {\n \n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n \n A_marker[i2] = ic;\n \n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_ext.\n *-----------------------------------------------------------*/\n \n for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)\n {\n i3 = P_ext_diag_j[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begin_diag)\n {\n P_marker[i3] = jj_count_diag;\n jj_count_diag++;\n }\n }\n for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)\n {\n i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]]+num_cols_diag_P;\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n\n if (P_marker[i3] < jj_row_begin_offd)\n {\n P_marker[i3] = jj_count_offd;\n jj_count_offd++;\n }\n }\n }\n }\n }\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_diag.\n *-----------------------------------------------------------------*/\n \n for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)\n {\n i2 = A_diag_j[jj2];\n \n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n \n if (A_marker[i2+num_cols_offd_A] != ic)\n {\n \n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n \n A_marker[i2+num_cols_offd_A] = ic;\n \n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_diag.\n *-----------------------------------------------------------*/\n \n for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)\n {\n i3 = P_diag_j[jj3];\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n \n if (P_marker[i3] < jj_row_begin_diag)\n {\n P_marker[i3] = jj_count_diag;\n jj_count_diag++;\n }\n }\n /*-----------------------------------------------------------\n * Loop over entries in row i2 of P_offd.\n *-----------------------------------------------------------*/\n\n if (num_cols_offd_P)\n { \n for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)\n {\n i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P;\n \n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i3} has not already\n * been accounted for. If it has not, mark it and increment\n * counter.\n *--------------------------------------------------------*/\n \n if (P_marker[i3] < jj_row_begin_offd)\n {\n P_marker[i3] = jj_count_offd;\n jj_count_offd++;\n }\n }\n } \n }\n }\n }\n \n /*--------------------------------------------------------------------\n * Set RAP_diag_i and RAP_offd_i for this row.\n *--------------------------------------------------------------------*/\n/* \n RAP_diag_i[ic] = jj_row_begin_diag;\n RAP_offd_i[ic] = jj_row_begin_offd;\n*/ \n }\n jj_cnt_diag[ii] = jj_count_diag;\n jj_cnt_offd[ii] = jj_count_offd;\n } #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "----------------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\nfor (ii = 0; ii < num_threads; ii++)\n {\n size = num_cols_diag_RT/num_threads;\n rest = num_cols_diag_RT - size*num_threads;\n if (ii < rest)\n {\n ns = ii*size+ii;\n ne = (ii+1)*size+ii+1;\n }\n else\n {\n ns = ii*size+rest;\n ne = (ii+1)*size+rest;\n }\n\n /*-----------------------------------------------------------------------\n * Initialize some stuff.\n *-----------------------------------------------------------------------*/\n\n P_marker = P_mark_array[ii];\n A_marker = A_mark_array[ii];\n for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++)\n { \n P_marker[ic] = -1;\n }\n for (i = 0; i < num_nz_cols_A ; i++)\n { \n A_marker[i] = -1;\n } \n \n jj_count_diag = start_indexing;\n jj_count_offd = start_indexing;\n if (ii > 0)\n {\n jj_count_diag = jj_cnt_diag[ii-1];\n jj_count_offd = jj_cnt_offd[ii-1];\n }\n\n // temporal matrix RA = R*A\n // only need to store one row per thread because R*A and (R*A)*P are fused\n // into one loop.\n hypre_CSRMatrix RA_diag, RA_offd;\n RA_diag.data = RA_diag_data_array + num_cols_diag_A*ii;\n RA_diag.j = RA_diag_j_array + num_cols_diag_A*ii;\n RA_diag.num_nonzeros = 0;\n RA_offd.num_nonzeros = 0;\n\n if (num_cols_offd_A)\n {\n RA_offd.data = RA_offd_data_array + num_cols_offd_A*ii;\n RA_offd.j = RA_offd_j_array + num_cols_offd_A*ii;\n }\n\n /*-----------------------------------------------------------------------\n * Loop over interior c-points.\n *-----------------------------------------------------------------------*/\n \n for (ic = ns; ic < ne; ic++)\n {\n \n /*--------------------------------------------------------------------\n * Create diagonal entry, RAP_{ic,ic} and add entries of RAP_ext \n *--------------------------------------------------------------------*/\n\n jj_row_begin_diag = jj_count_diag;\n jj_row_begin_offd = jj_count_offd;\n RAP_diag_i[ic] = jj_row_begin_diag;\n RAP_offd_i[ic] = jj_row_begin_offd;\n\n HYPRE_Int ra_row_begin_diag = RA_diag.num_nonzeros;\n HYPRE_Int ra_row_begin_offd = RA_offd.num_nonzeros;\n\n if (square)\n {\n P_marker[ic] = jj_count_diag;\n RAP_diag_data[jj_count_diag] = zero;\n RAP_diag_j[jj_count_diag] = ic;\n jj_count_diag++;\n }\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n if (send_map_elmts_RT_inverse_map_initialized)\n {\n HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);\n if (i != -1)\n {\n for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++)\n {\n HYPRE_Int jj = send_map_elmts_RT_aggregated[j];\n for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++)\n {\n jcol = RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n RAP_diag_data[jj_count_diag] \n = RAP_ext_data[k];\n RAP_diag_j[jj_count_diag] = jcol;\n jj_count_diag++;\n }\n else\n RAP_diag_data[P_marker[jcol]]\n += RAP_ext_data[k];\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n RAP_offd_data[jj_count_offd] \n = RAP_ext_data[k];\n RAP_offd_j[jj_count_offd] \n = jcol-num_cols_diag_P;\n jj_count_offd++;\n }\n else\n RAP_offd_data[P_marker[jcol]]\n += RAP_ext_data[k];\n }\n }\n }\n } // if (set)\n }\n#else /* !HYPRE_CONCURRENT_HOPSCOTCH */\n for (i=0; i < num_sends_RT; i++)\n for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++)\n if (send_map_elmts_RT[j] == ic)\n {\n for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++)\n {\n jcol = RAP_ext_j[k];\n if (jcol < num_cols_diag_P)\n {\n if (P_marker[jcol] < jj_row_begin_diag)\n {\n P_marker[jcol] = jj_count_diag;\n RAP_diag_data[jj_count_diag] \n = RAP_ext_data[k];\n RAP_diag_j[jj_count_diag] = jcol;\n jj_count_diag++;\n }\n else\n RAP_diag_data[P_marker[jcol]]\n += RAP_ext_data[k];\n }\n else\n {\n if (P_marker[jcol] < jj_row_begin_offd)\n {\n P_marker[jcol] = jj_count_offd;\n RAP_offd_data[jj_count_offd] \n = RAP_ext_data[k];\n RAP_offd_j[jj_count_offd] \n = jcol-num_cols_diag_P;\n jj_count_offd++;\n }\n else\n RAP_offd_data[P_marker[jcol]]\n += RAP_ext_data[k];\n }\n }\n break;\n }\n /* !HYPRE_CONCURRENT_HOPSCOTCH */\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of R_diag and compute row ic of RA.\n *--------------------------------------------------------------------*/\n\n for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++)\n {\n i1 = R_diag_j[jj1];\n r_entry = R_diag_data[jj1];\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_offd.\n *-----------------------------------------------------------------*/\n \n if (num_cols_offd_A)\n {\n for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)\n {\n i2 = A_offd_j[jj2];\n HYPRE_Real a_entry = A_offd_data[jj2];\n HYPRE_Int marker = A_marker[i2];\n\n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (marker < ra_row_begin_offd)\n {\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n\n A_marker[i2] = RA_offd.num_nonzeros;\n RA_offd.data[RA_offd.num_nonzeros - ra_row_begin_offd] = r_entry * a_entry;\n RA_offd.j[RA_offd.num_nonzeros - ra_row_begin_offd] = i2;\n RA_offd.num_nonzeros++;\n }\n /*--------------------------------------------------------------\n * If i2 is previously visited ( A_marker[12]=ic ) it yields\n * no new entries in RA and can just add new contributions.\n *--------------------------------------------------------------*/\n else\n {\n RA_offd.data[marker - ra_row_begin_offd] += r_entry * a_entry;\n // JSP: compiler will more likely to generate FMA instructions\n // when we don't eliminate common subexpressions of\n // r_entry * A_offd_data[jj2] manually.\n }\n } // loop over entries in row i1 of A_offd\n } // num_cols_offd_A\n \n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of A_diag.\n *-----------------------------------------------------------------*/\n \n for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)\n {\n i2 = A_diag_j[jj2];\n HYPRE_Real a_entry = A_diag_data[jj2];\n HYPRE_Int marker = A_marker[i2+num_cols_offd_A];\n \n /*--------------------------------------------------------------\n * Check A_marker to see if point i2 has been previously\n * visited. New entries in RAP only occur from unmarked points.\n *--------------------------------------------------------------*/\n\n if (marker < ra_row_begin_diag)\n {\n /*-----------------------------------------------------------\n * Mark i2 as visited.\n *-----------------------------------------------------------*/\n A_marker[i2+num_cols_offd_A] = RA_diag.num_nonzeros;\n RA_diag.data[RA_diag.num_nonzeros - ra_row_begin_diag] = r_entry * a_entry;\n RA_diag.j[RA_diag.num_nonzeros - ra_row_begin_diag] = i2;\n RA_diag.num_nonzeros++;\n }\n /*--------------------------------------------------------------\n * If i2 is previously visited ( A_marker[12]=ic ) it yields\n * no new entries in RA and can just add new contributions.\n *--------------------------------------------------------------*/\n else\n {\n RA_diag.data[marker - ra_row_begin_diag] += r_entry * a_entry;\n }\n } // loop over entries in row i1 of A_diag\n } // loop over entries in row ic of R_diag\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of RA_offd.\n *--------------------------------------------------------------------*/\n\n for (jj1 = ra_row_begin_offd; jj1 < RA_offd.num_nonzeros; jj1++)\n {\n i1 = RA_offd.j[jj1 - ra_row_begin_offd];\n r_a_product = RA_offd.data[jj1 - ra_row_begin_offd];\n\n /*-----------------------------------------------------------\n * Loop over entries in row i1 of P_ext.\n *-----------------------------------------------------------*/\n for (jj2 = P_ext_diag_i[i1]; jj2 < P_ext_diag_i[i1+1]; jj2++)\n {\n i2 = P_ext_diag_j[jj2];\n HYPRE_Real p_entry = P_ext_diag_data[jj2];\n HYPRE_Int marker = P_marker[i2];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i2} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n if (marker < jj_row_begin_diag)\n {\n P_marker[i2] = jj_count_diag;\n RAP_diag_data[jj_count_diag] = r_a_product * p_entry;\n RAP_diag_j[jj_count_diag] = i2;\n jj_count_diag++;\n }\n else\n RAP_diag_data[marker] += r_a_product * p_entry;\n }\n for (jj2 = P_ext_offd_i[i1]; jj2 < P_ext_offd_i[i1+1]; jj2++)\n {\n i2 = map_Pext_to_RAP[P_ext_offd_j[jj2]] + num_cols_diag_P;\n HYPRE_Real p_entry = P_ext_offd_data[jj2];\n HYPRE_Int marker = P_marker[i2];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i2} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n if (marker < jj_row_begin_offd)\n {\n P_marker[i2] = jj_count_offd;\n RAP_offd_data[jj_count_offd] = r_a_product * p_entry;\n RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;\n jj_count_offd++;\n }\n else\n RAP_offd_data[marker] += r_a_product * p_entry;\n }\n } // loop over entries in row ic of RA_offd\n\n /*--------------------------------------------------------------------\n * Loop over entries in row ic of RA_diag.\n *--------------------------------------------------------------------*/\n\n for (jj1 = ra_row_begin_diag; jj1 < RA_diag.num_nonzeros; jj1++)\n {\n HYPRE_Int i1 = RA_diag.j[jj1 - ra_row_begin_diag];\n HYPRE_Real r_a_product = RA_diag.data[jj1 - ra_row_begin_diag];\n\n /*-----------------------------------------------------------------\n * Loop over entries in row i1 of P_diag.\n *-----------------------------------------------------------------*/\n for (jj2 = P_diag_i[i1]; jj2 < P_diag_i[i1+1]; jj2++)\n {\n i2 = P_diag_j[jj2];\n HYPRE_Real p_entry = P_diag_data[jj2];\n HYPRE_Int marker = P_marker[i2];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i2} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (marker < jj_row_begin_diag)\n {\n P_marker[i2] = jj_count_diag;\n RAP_diag_data[jj_count_diag] = r_a_product * p_entry;\n RAP_diag_j[jj_count_diag] = i2;\n jj_count_diag++;\n }\n else\n {\n RAP_diag_data[marker] += r_a_product * p_entry;\n }\n }\n if (num_cols_offd_P)\n {\n for (jj2 = P_offd_i[i1]; jj2 < P_offd_i[i1+1]; jj2++)\n {\n i2 = map_P_to_RAP[P_offd_j[jj2]] + num_cols_diag_P;\n HYPRE_Real p_entry = P_offd_data[jj2];\n HYPRE_Int marker = P_marker[i2];\n\n /*--------------------------------------------------------\n * Check P_marker to see that RAP_{ic,i2} has not already\n * been accounted for. If it has not, create a new entry.\n * If it has, add new contribution.\n *--------------------------------------------------------*/\n\n if (marker < jj_row_begin_offd)\n {\n P_marker[i2] = jj_count_offd;\n RAP_offd_data[jj_count_offd] = r_a_product * p_entry;\n RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;\n jj_count_offd++;\n }\n else\n {\n RAP_offd_data[marker] += r_a_product * p_entry;\n }\n }\n } // num_cols_offd_P\n } // loop over entries in row ic of RA_diag.\n } // Loop over interior c-points.\n hypre_TFree(P_mark_array[ii]); \n hypre_TFree(A_mark_array[ii]); \n } #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "f necessary */\n\n P_marker = hypre_CTAlloc(HYPRE_Int,num_cols_offd_RAP);\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < num_cols_offd_RAP; i++)\n P_marker[i] = -1;\n\n jj_count_offd = 0;\n#ifdef HYPRE_USING_ATOMIC\n#pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE\n\n for (i=0; i < RAP_offd_size; i++)\n {\n i3 = RAP_offd_j[i];\n#ifdef HYPRE_USING_ATOMIC\n if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1)\n {\n jj_count_offd++;\n }\n#else\n if (P_marker[i3])\n {\n P_marker[i3] = 0;\n jj_count_offd++;\n }\n\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " < num_cols_offd_RAP; i++)\n P_marker[i] = -1;\n\n jj_count_offd = 0;\n#ifdef HYPRE_USING_ATOMIC\nfor (i=0; i < RAP_offd_size; i++)\n {\n i3 = RAP_offd_j[i];\n#ifdef HYPRE_USING_ATOMIC\n if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1)\n {\n jj_count_offd++;\n }\n#else\n if (P_marker[i3])\n {\n P_marker[i3] = 0;\n jj_count_offd++;\n }\n\n } #pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_rap.c", "omp_pragma_line": "#pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " new_col_map_offd_RAP[jj_counter++] = col_map_offd_RAP[i];\n }\n \n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < RAP_offd_size; i++)\n {\n\t i3 = RAP_offd_j[i];\n\t RAP_offd_j[i] = P_marker[i3];\n } #pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ";\n HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);\n#ifdef HYPRE_USING_OPENMP\nfor (i = begin; i < end; ++i) {\n int_buf_data[i - begin] =\n IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ");\n end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends);\n#ifdef HYPRE_USING_OPENMP\nfor (i = begin; i < end; ++i) {\n int_buf_data[i - begin] =\n IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\n HYPRE_Int i;\n\n /* Quicker initialization */\n if(offd_n < diag_n)\n {\n#ifdef HYPRE_USING_OPENMP\nfor(i = 0; i < offd_n; i++)\n {\n diag_ftc[i] = -1;\n offd_ftc[i] = -1;\n tmp_CF[i] = -1;\n if(diag_pm != NULL)\n { diag_pm[i] = -1; }\n if(offd_pm != NULL)\n { offd_pm[i] = -1;}\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "_pm[i] = -1; }\n if(offd_pm != NULL)\n { offd_pm[i] = -1;}\n }\n#ifdef HYPRE_USING_OPENMP\nfor(i = offd_n; i < diag_n; i++)\n { \n diag_ftc[i] = -1;\n if(diag_pm != NULL)\n { diag_pm[i] = -1; }\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if(diag_pm != NULL)\n { diag_pm[i] = -1; }\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor(i = 0; i < diag_n; i++)\n {\n diag_ftc[i] = -1;\n offd_ftc[i] = -1;\n tmp_CF[i] = -1;\n if(diag_pm != NULL)\n { diag_pm[i] = -1;}\n if(offd_pm != NULL)\n { offd_pm[i] = -1;}\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "g_pm[i] = -1;}\n if(offd_pm != NULL)\n { offd_pm[i] = -1;}\n }\n#ifdef HYPRE_USING_OPENMP\nfor(i = diag_n; i < offd_n; i++)\n { \n offd_ftc[i] = -1;\n tmp_CF[i] = -1;\n if(offd_pm != NULL)\n { offd_pm[i] = -1;}\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "e;\n hypre_UnorderedIntMapCreate(&col_map_offd_inverse, 2*num_cols_A_offd, 16*hypre_NumThreads());\n\nfor (i = 0; i < num_cols_A_offd; i++)\n {\n hypre_UnorderedIntMapPutIfAbsent(&col_map_offd_inverse, col_map_offd[i], i);\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for private(kk,k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ndif\n\n /* Set column indices for Sop and A_ext such that offd nodes are\n * negatively indexed */\nfor(i = 0; i < num_cols_A_offd; i++)\n {\n if (CF_marker_offd[i] < 0)\n {\n for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++)\n {\n k1 = Sop_j[kk];\n if(k1 > -1 && (k1 < col_1 || k1 >= col_n))\n { \n got_loc = hypre_UnorderedIntMapGet(&tmp_found_inverse, k1);\n loc_col = got_loc + num_cols_A_offd;\n Sop_j[kk] = -loc_col - 1;\n }\n }\n for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++)\n {\n k1 = A_ext_j[kk];\n if(k1 > -1 && (k1 < col_1 || k1 >= col_n))\n {\n got_loc = hypre_UnorderedIntMapGet(&tmp_found_inverse, k1);\n loc_col = got_loc + num_cols_A_offd;\n A_ext_j[kk] = -loc_col - 1;\n }\n }\n }\n } #pragma omp parallel for private(kk,k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " HYPRE_Int *int_buf_data = hypre_CTAlloc(HYPRE_Int, end);\n\n HYPRE_Int i;\n#ifdef HYPRE_USING_OPENMP\nfor (i = begin; i < end; ++i) {\n int_buf_data[i - begin] =\n IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "cNodes)\n P_marker = hypre_TAlloc(HYPRE_Int, full_off_procNodes);\n \n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < full_off_procNodes; i++)\n P_marker[i] = 0;\n \n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\n\n /* These two loops set P_marker[i] to 1 if it appears in P_offd_j and if\n * tmp_CF_marker_offd has i marked. num_cols_P_offd is then set to the\n * total number of times P_marker is set */\n#pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE\n for (i=0; i < P_offd_size; i++)\n {\n index = P_offd_j[i];\n if(tmp_CF_marker_offd[index] >= 0)\n { P_marker[index] = 1; }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "ffd has i marked. num_cols_P_offd is then set to the\n * total number of times P_marker is set */\nfor (i=0; i < P_offd_size; i++)\n {\n index = P_offd_j[i];\n if(tmp_CF_marker_offd[index] >= 0)\n { P_marker[index] = 1; }\n } #pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "d_P, num_cols_P_offd, &col_map_offd_P, &col_map_offd_P_inverse);\n\n // find old idx -> new idx map\nfor (i = 0; i < full_off_procNodes; i++)\n P_marker[i] = hypre_UnorderedIntMapGet(&col_map_offd_P_inverse, fine_to_coarse_offd[i]);\n\n if (num_cols_P_offd)\n {\n hypre_UnorderedIntMapDestroy(&col_map_offd_P_inverse);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/aux_interp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i]);\n\n if (num_cols_P_offd)\n {\n hypre_UnorderedIntMapDestroy(&col_map_offd_P_inverse);\n }\nfor(i = 0; i < P_offd_size; i++)\n P_offd_j[i] = P_marker[P_offd_j[i]];\n\n#else /* HYPRE_CONCURRENT_HOPSCOTCH */\n HYPRE_Int num_cols_P_offd = 0;\n HYPRE_Int j;\n for (i=0; i < P_offd_size; i++)\n {\n index = P_offd_j[i];\n if (!P_marker[index])\n {\n\t if(tmp_CF_marker_offd[index] >= 0)\n\t {\n\t num_cols_P_offd++;\n\t P_marker[index] = 1;\n\t }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_add_cycle.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n hypre_ParVectorCopy(F_array[fine_grid],Vtemp);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n u_data[i] = relax_weight[level]*v_data[i] / A_data[A_i[i]];\n }\n }\n\n else if (rlx_down != 18)\n {\n /*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_down,0,*/\n for (j=0; j < num_grid_sweeps[1]; j++)\n {\n hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],\n\t CF_marker_array[fine_grid], rlx_down,rlx_order,1,\n relax_weight[fine_grid], omega[fine_grid],\n l1_norms[level], U_array[fine_grid], Vtemp, Ztemp);\n hypre_ParVectorCopy(F_array[fine_grid],Vtemp);\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_add_cycle.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n hypre_ParVectorCopy(F_array[fine_grid],Vtemp);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n u_data[i] += v_data[i] / l1_norms_lvl[i];\n }\n }\n \n alpha = -1.0;\n beta = 1.0;\n hypre_ParCSRMatrixMatvec(alpha, A_array[fine_grid], U_array[fine_grid],\n beta, Vtemp);\n\n alpha = 1.0;\n beta = 0.0;\n hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,\n beta,F_array[coarse_grid]);\n }\n else /* additive version */\n {\n hypre_ParVectorCopy(F_array[fine_grid],Vtemp);\n if (level == 0) /* compute residual */\n {\n hypre_ParVectorCopy(Vtemp, Rtilde);\n hypre_ParVectorCopy(U_array[fine_grid],Xtilde);\n }\n alpha = 1.0;\n beta = 0.0;\n hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,\n beta,F_array[coarse_grid]);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_add_cycle.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde));\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < n_global; i++)\n\t x_global[i] += D_inv[i]*r_global[i];\n }\n else\n {\n if (num_grid_sweeps[1] > 1)\n {\n n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Rtilde));\n hypre_ParVector *Tmptilde = hypre_CTAlloc(hypre_ParVector, 1);\n hypre_Vector *Tmptilde_local = hypre_SeqVectorCreate(n_global); \n hypre_SeqVectorInitialize(Tmptilde_local);\n hypre_ParVectorLocalVector(Tmptilde) = Tmptilde_local; \n hypre_ParVectorOwnsData(Tmptilde) = 1;\n hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 0.0, Tmptilde);\n hypre_ParVectorScale(2.0,Rtilde);\n hypre_ParCSRMatrixMatvec(-1.0, Atilde, Tmptilde, 1.0, Rtilde);\n hypre_ParVectorDestroy(Tmptilde);\n }\n hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 1.0, Xtilde);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "-------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "---------------------------*/\n\n if (relax_points == 0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n res -= A_diag_data[jj] * Vtemp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= one_minus_weight; \n u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]];\n }\n } #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "---------------------------------------------*/\n\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n res -= A_diag_data[jj] * Vtemp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= one_minus_weight; \n u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]];\n }\n } #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "---------------------------*/\n\n if (relax_points == 0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n res -= A_diag_data[jj] * u_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "---------------------------------------------*/\n\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n res -= A_diag_data[jj] * u_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);\n#ifdef HYPRE_USING_OPENMP\nfor (i = begin; i < end; i++)\n {\n v_buf_data[i - begin]\n = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n\t if (num_threads > 1)\n {\n\t tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n res -= A_diag_data[jj] * u_data[ii];\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n res -= A_diag_data[jj] * u_data[ii];\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n\t if (num_threads > 1)\n\t {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n res -= A_diag_data[jj] * u_data[ii];\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } \n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n res -= A_diag_data[jj] * u_data[ii];\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } \n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n } \n\t }\n }\n }\n\telse\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "{\n\t if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n res0 = 0.0;\n res2 = 0.0;\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n res0 = 0.0;\n res2 = 0.0;\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n\t if (num_threads > 1)\n\t {\n tmp_data = Ztemp_data;\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n } \n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n } \n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "(num_threads > 1)\n {\n\t tmp_data = hypre_CTAlloc(HYPRE_Real,n);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n res -= A_diag_data[jj] * u_data[ii];\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n res -= A_diag_data[jj] * u_data[ii];\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\n\t if (num_threads > 1)\n\t {\n\t tmp_data = hypre_CTAlloc(HYPRE_Real,n);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n res -= A_diag_data[jj] * u_data[ii];\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } \n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n res -= A_diag_data[jj] * u_data[ii];\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } \n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n } \n\t }\n }\n }\n\t else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "(num_threads > 1)\n {\n\t tmp_data = hypre_CTAlloc(HYPRE_Real,n);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n res0 = 0.0;\n res2 = 0.0;\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n res0 = 0.0;\n res2 = 0.0;\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\n\t if (num_threads > 1)\n\t {\n\t tmp_data = hypre_CTAlloc(HYPRE_Real,n);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n } \n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n } \n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "{\n\t if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n\t if (num_threads > 1)\n\t {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } \n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } \n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } \n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] = res / A_diag_data[A_diag_i[i]];\n }\n } \n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n } \n\t }\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "{\n\t if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if ( A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " {\n\t if (num_threads > 1)\n\t {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n } \n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n } \n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "#endif\n for (i = 0; i < n; i++)\n\t tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n\t {\n\t size = n/num_threads;\n\t rest = n - size*num_threads;\n\t if (j < rest)\n\t {\n\t ns = j*size+j;\n\t ne = (j+1)*size+j+1;\n\t }\n\t else\n\t {\n\t ns = j*size+rest;\n\t ne = (j+1)*size+rest;\n\t }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n } \n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n \n if (cf_marker[i] == relax_points \n\t\t\t\t&& A_diag_data[A_diag_i[i]] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n\t\t if (ii >= ns && ii < ne)\n\t\t {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n\t\t }\n\t\t else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n\t\t\tone_minus_omega*res2) / A_diag_data[A_diag_i[i]];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n\t\t\tone_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/\n }\n } \n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n }\n }\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n }\n }\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res = f_data[i];\n for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] += res / l1_norms[i];\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n }\n }\n }\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n {\n Vtemp_data[i] = u_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ne-1; i > ns-1; i--)\t/* interior points first */\n {\n\n /*-----------------------------------------------------------\n * If diagonal is nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if ( l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res0 -= A_diag_data[jj] * u_data[ii];\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (num_threads > 1)\n {\n tmp_data = Ztemp_data;\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_relax.c", "omp_pragma_line": "#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " for (i = 0; i < n; i++)\n tmp_data[i] = u_data[i];\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n/num_threads;\n rest = n - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ne-1; i > ns-1; i--) /* relax interior points */\n {\n\n /*-----------------------------------------------------------\n * If i is of the right type ( C or F ) and diagonal is\n * nonzero, relax point i; otherwise, skip it.\n *-----------------------------------------------------------*/\n\n if (cf_marker[i] == relax_points\n && l1_norms[i] != zero)\n {\n res0 = 0.0;\n res2 = 0.0;\n res = f_data[i];\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n ii = A_diag_j[jj];\n if (ii >= ns && ii < ne)\n {\n res2 += A_diag_data[jj] * Vtemp_data[ii];\n res0 -= A_diag_data[jj] * u_data[ii];\n }\n else\n res -= A_diag_data[jj] * tmp_data[ii];\n }\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n ii = A_offd_j[jj];\n res -= A_offd_data[jj] * Vext_data[ii];\n }\n u_data[i] *= prod;\n u_data[i] += relax_weight*(omega*res + res0 +\n one_minus_omega*res2) / l1_norms[i];\n /*u_data[i] += omega*(relax_weight*res + res0 +\n one_minus_weight*res2) / l1_norms[i];*/\n }\n }\n } #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "Int, num_threads);\n\n fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;\n\n jj_counter = start_indexing;\n jj_counter_offd = start_indexing;\n \n /*-----------------------------------------------------------------------\n * Loop over fine grid.\n *-----------------------------------------------------------------------*/\n\n/* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n\t if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------*/\n\n/* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n\t if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ");\n\n fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd); \n\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) coarse_shift = coarse_counter[j-1];\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n\tfine_to_coarse[i] += my_first_cpt+coarse_shift;\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "LL);\n }\n\n if (debug_flag==4) wall_time = time_getWallclockSeconds();\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE\n\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (jl < rest)\n {\n ns = jl*size+jl;\n ne = (jl+1)*size+jl+1;\n }\n else\n {\n ns = jl*size+rest;\n ne = (jl+1)*size+rest;\n }\n jj_counter = 0;\n if (jl > 0) jj_counter = jj_count[jl-1];\n jj_counter_offd = 0;\n if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);\n if (num_cols_A_offd)\n \tP_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n else\n \tP_marker_offd = NULL;\n\n for (i = 0; i < n_fine; i++)\n { \n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n { \n P_marker_offd[i] = -1;\n }\n strong_f_marker = -2;\n \n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n \n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n { \n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n P_marker[i1] = strong_f_marker;\n } \n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n } \n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n } \n }\n }\n }\n \n jj_end_row_offd = jj_counter_offd;\n \n diagonal = A_diag_data[A_diag_i[i]];\n\n \n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *--------------------------------------------------------------*/\n \n else if (P_marker[i1] == strong_f_marker)\n {\n sum = zero;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n\t sgn = 1;\n\t if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row && \n\t\t\t\t\t(sgn*A_diag_data[jj1]) < 0)\n {\n sum += A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */ \n if (num_procs > 1)\n { \n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n } \n\n if (sum != 0)\n\t {\n\t distribute = A_diag_data[jj] / sum;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row \n\t\t\t\t&& (sgn*A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]] \n += distribute * A_offd_data[jj1]; \n }\n }\n }\n }\n else\n {\n\t\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n }\n }\n \n /*--------------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n } \n\n } \n \n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the \n * off-diagonal part of A \n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *-----------------------------------------------------------*/\n \n else if (P_marker_offd[i1] == strong_f_marker)\n {\n sum = zero;\n \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n\t\t sgn = 1;\n\t\t if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n \n if (i2 > -1)\n { \n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else \n { \n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n\t\t\t sum += A_ext_data[jj1];\n }\n \n }\n\n }\n\n if (sum != 0)\n\t\t {\n\t\t distribute = A_offd_data[jj] / sum; \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do \n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n \n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n\n if (i2 > -1) /* in the diagonal block */ \n {\n if (P_marker[i2] >= jj_begin_row\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2-1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n\t\t else\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n }\n }\n \n /*-----------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n } \n\n }\n } \n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n if (diagonal == 0.0)\n {\n if (print_level)\n hypre_printf(\" Warning! zero diagonal! Proc id %d row %d\\n\", my_id,i); \n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] = 0.0;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] = 0.0;\n }\n }\n else\n {\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n \n }\n\n strong_f_marker--; \n\n P_offd_i[i+1] = jj_counter_offd;\n }\n hypre_TFree(P_marker);\n hypre_TFree(P_marker_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "------------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\nfor (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (jl < rest)\n {\n ns = jl*size+jl;\n ne = (jl+1)*size+jl+1;\n }\n else\n {\n ns = jl*size+rest;\n ne = (jl+1)*size+rest;\n }\n jj_counter = 0;\n if (jl > 0) jj_counter = jj_count[jl-1];\n jj_counter_offd = 0;\n if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);\n if (num_cols_A_offd)\n \tP_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n else\n \tP_marker_offd = NULL;\n\n for (i = 0; i < n_fine; i++)\n { \n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n { \n P_marker_offd[i] = -1;\n }\n strong_f_marker = -2;\n \n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n \n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n { \n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n P_marker[i1] = strong_f_marker;\n } \n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n } \n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n } \n }\n }\n }\n \n jj_end_row_offd = jj_counter_offd;\n \n diagonal = A_diag_data[A_diag_i[i]];\n\n \n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *--------------------------------------------------------------*/\n \n else if (P_marker[i1] == strong_f_marker)\n {\n sum = zero;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n\t sgn = 1;\n\t if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row && \n\t\t\t\t\t(sgn*A_diag_data[jj1]) < 0)\n {\n sum += A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */ \n if (num_procs > 1)\n { \n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n } \n\n if (sum != 0)\n\t {\n\t distribute = A_diag_data[jj] / sum;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row \n\t\t\t\t&& (sgn*A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]] \n += distribute * A_offd_data[jj1]; \n }\n }\n }\n }\n else\n {\n\t\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n }\n }\n \n /*--------------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n } \n\n } \n \n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the \n * off-diagonal part of A \n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *-----------------------------------------------------------*/\n \n else if (P_marker_offd[i1] == strong_f_marker)\n {\n sum = zero;\n \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n\t\t sgn = 1;\n\t\t if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n \n if (i2 > -1)\n { \n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else \n { \n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n\t\t\t sum += A_ext_data[jj1];\n }\n \n }\n\n }\n\n if (sum != 0)\n\t\t {\n\t\t distribute = A_offd_data[jj] / sum; \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do \n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n \n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n\n if (i2 > -1) /* in the diagonal block */ \n {\n if (P_marker[i2] >= jj_begin_row\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2-1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n\t\t else\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n }\n }\n \n /*-----------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n } \n\n }\n } \n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n if (diagonal == 0.0)\n {\n if (print_level)\n hypre_printf(\" Warning! zero diagonal! Proc id %d row %d\\n\", my_id,i); \n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] = 0.0;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] = 0.0;\n }\n }\n else\n {\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n \n }\n\n strong_f_marker--; \n\n P_offd_i[i+1] = jj_counter_offd;\n }\n hypre_TFree(P_marker);\n hypre_TFree(P_marker_offd);\n } #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "d_size)\n {\n P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < num_cols_A_offd; i++)\n\t P_marker[i] = 0;\n\n num_cols_P_offd = 0;\n for (i=0; i < P_offd_size; i++)\n {\n\t index = P_offd_j[i];\n\t if (!P_marker[index])\n\t {\n \t num_cols_P_offd++;\n \t P_marker[index] = 1;\n \t }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "marker[index]==0) index++;\n col_map_offd_P[i] = index++;\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < P_offd_size; i++)\n\tP_offd_j[i] = hypre_BinarySearch(col_map_offd_P,\n\t\t\t\t\t P_offd_j[i],\n\t\t\t\t\t num_cols_P_offd);\n hypre_TFree(P_marker); \n }\n\n for (i=0; i < n_fine; i++)\n if (CF_marker[i] == -3) CF_marker[i] = -1;\n\n if (num_cols_P_offd)\n { \n \thypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;\n \thypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "Int, num_threads);\n\n fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;\n\n jj_counter = start_indexing;\n jj_counter_offd = start_indexing;\n \n /*-----------------------------------------------------------------------\n * Loop over fine grid.\n *-----------------------------------------------------------------------*/\n\n/* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------*/\n\n/* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ");\n\n fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd); \n\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) coarse_shift = coarse_counter[j-1];\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n\tfine_to_coarse[i] += my_first_cpt+coarse_shift;\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "LL);\n }\n\n if (debug_flag==4) wall_time = time_getWallclockSeconds();\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE\n\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (jl < rest)\n {\n ns = jl*size+jl;\n ne = (jl+1)*size+jl+1;\n }\n else\n {\n ns = jl*size+rest;\n ne = (jl+1)*size+rest;\n }\n jj_counter = 0;\n if (jl > 0) jj_counter = jj_count[jl-1];\n jj_counter_offd = 0;\n if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);\n if (num_cols_A_offd)\n\tP_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n else\n\tP_marker_offd = NULL;\n\n for (i = 0; i < n_fine; i++)\n { \n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n { \n P_marker_offd[i] = -1;\n }\n \n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n \n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n { \n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n\t if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n }\n }\n }\n \n jj_end_row_offd = jj_counter_offd;\n \n diagonal = A_diag_data[A_diag_i[i]];\n\n \n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and influences i,\n * distribute a_{i,i1} to C-points that strongly influence i.\n * Note: currently no distribution to the diagonal in this case.\n *--------------------------------------------------------------*/\n \n else \n {\n sum = zero;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n\t sgn = 1;\n\t if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row && \n\t\t\t\t\t(sgn*A_diag_data[jj1]) < 0)\n {\n sum += A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */ \n if (num_procs > 1)\n { \n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n } \n\n if (sum != 0)\n\t {\n\t distribute = A_diag_data[jj] / sum;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row \n\t\t\t\t&& (sgn*A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]] \n += distribute * A_offd_data[jj1]; \n }\n }\n }\n }\n else\n {\n\t\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n } \n }\n \n } \n \n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the \n * off-diagonal part of A \n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *-----------------------------------------------------------*/\n \n else \n {\n sum = zero;\n \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n\t\t sgn = 1;\n\t\t if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n \n if (i2 > -1)\n { \n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else \n { \n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n\t\t\t sum += A_ext_data[jj1];\n }\n \n }\n\n }\n\n if (sum != 0)\n\t\t {\n\t\t distribute = A_offd_data[jj] / sum; \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do \n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n \n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n\n if (i2 > -1) /* in the diagonal block */ \n {\n if (P_marker[i2] >= jj_begin_row\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2-1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n\t\t else\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n }\n }\n \n }\n } \n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n \n }\n\n P_offd_i[i+1] = jj_counter_offd;\n }\n hypre_TFree(P_marker);\n hypre_TFree(P_marker_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "------------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\nfor (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (jl < rest)\n {\n ns = jl*size+jl;\n ne = (jl+1)*size+jl+1;\n }\n else\n {\n ns = jl*size+rest;\n ne = (jl+1)*size+rest;\n }\n jj_counter = 0;\n if (jl > 0) jj_counter = jj_count[jl-1];\n jj_counter_offd = 0;\n if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);\n if (num_cols_A_offd)\n\tP_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n else\n\tP_marker_offd = NULL;\n\n for (i = 0; i < n_fine; i++)\n { \n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n { \n P_marker_offd[i] = -1;\n }\n \n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n \n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n { \n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n\t if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n }\n }\n }\n \n jj_end_row_offd = jj_counter_offd;\n \n diagonal = A_diag_data[A_diag_i[i]];\n\n \n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and influences i,\n * distribute a_{i,i1} to C-points that strongly influence i.\n * Note: currently no distribution to the diagonal in this case.\n *--------------------------------------------------------------*/\n \n else \n {\n sum = zero;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n\t sgn = 1;\n\t if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row && \n\t\t\t\t\t(sgn*A_diag_data[jj1]) < 0)\n {\n sum += A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */ \n if (num_procs > 1)\n { \n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n } \n\n if (sum != 0)\n\t {\n\t distribute = A_diag_data[jj] / sum;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (P_marker[i2] >= jj_begin_row \n\t\t\t\t&& (sgn*A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n\n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (P_marker_offd[i2] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]] \n += distribute * A_offd_data[jj1]; \n }\n }\n }\n }\n else\n {\n\t\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n } \n }\n \n } \n \n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the \n * off-diagonal part of A \n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n *-----------------------------------------------------------*/\n \n else \n {\n sum = zero;\n \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n\t\t sgn = 1;\n\t\t if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n \n if (i2 > -1)\n { \n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else \n { \n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n\t\t\t sum += A_ext_data[jj1];\n }\n \n }\n\n }\n\n if (sum != 0)\n\t\t {\n\t\t distribute = A_offd_data[jj] / sum; \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do \n * the distribution.\n *--------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n \n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n\n if (i2 > -1) /* in the diagonal block */ \n {\n if (P_marker[i2] >= jj_begin_row\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n\t\t\t\t&& (sgn*A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2-1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n\t\t else\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n }\n }\n \n }\n } \n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n \n }\n\n P_offd_i[i+1] = jj_counter_offd;\n }\n hypre_TFree(P_marker);\n hypre_TFree(P_marker_offd);\n } #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "d_size)\n {\n P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < num_cols_A_offd; i++)\n P_marker[i] = 0;\n \n num_cols_P_offd = 0;\n for (i=0; i < P_offd_size; i++)\n {\n index = P_offd_j[i];\n if (!P_marker[index])\n {\n num_cols_P_offd++;\n P_marker[index] = 1;\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "marker[index]==0) index++;\n col_map_offd_P[i] = index++;\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < P_offd_size; i++)\n\tP_offd_j[i] = hypre_BinarySearch(col_map_offd_P,\n\t\t\t\t\t P_offd_j[i],\n\t\t\t\t\t num_cols_P_offd);\n hypre_TFree(P_marker); \n }\n\n for (i=0; i < n_fine; i++)\n if (CF_marker[i] == -3) CF_marker[i] = -1;\n\n if (num_cols_P_offd)\n { \n \thypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;\n hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "Int, num_threads);\n\n fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;\n\n jj_counter = start_indexing;\n jj_counter_offd = start_indexing;\n \n /*-----------------------------------------------------------------------\n * Loop over fine grid.\n *-----------------------------------------------------------------------*/\n\n/* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n if (CF_marker[i1] > 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n\t if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n if (CF_marker_offd[i1] > 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n if (CF_marker_offd[i1] > 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------*/\n\n/* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n if (CF_marker[i1] > 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n\t if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n if (CF_marker_offd[i1] > 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n if (CF_marker_offd[i1] > 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ");\n\n fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd); \n\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) coarse_shift = coarse_counter[j-1];\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n\tfine_to_coarse[i] += my_first_cpt+coarse_shift;\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "LL);\n }\n\n if (debug_flag==4) wall_time = time_getWallclockSeconds();\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE\n\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (jl < rest)\n {\n ns = jl*size+jl;\n ne = (jl+1)*size+jl+1;\n }\n else\n {\n ns = jl*size+rest;\n ne = (jl+1)*size+rest;\n }\n jj_counter = 0;\n if (jl > 0) jj_counter = jj_count[jl-1];\n jj_counter_offd = 0;\n if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);\n if (num_cols_A_offd)\n\tP_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n else\n\tP_marker_offd = NULL;\n\n for (i = 0; i < n_fine; i++)\n { \n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n { \n P_marker_offd[i] = -1;\n }\n \n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n \n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n { \n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n }\n }\n }\n \n jj_end_row_offd = jj_counter_offd;\n \n diagonal = A_diag_data[A_diag_i[i]];\n\n \n /* Loop over ith row of A. First, the diagonal part of A */\n\t sum_N_pos = 0;\n\t sum_N_neg = 0;\n\t sum_P_pos = 0;\n\t sum_P_neg = 0;\n\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n i1 = A_diag_j[jj];\n\t if (num_functions == 1 || dof_func[i1] == dof_func[i])\n\t { \n\t if (A_diag_data[jj] > 0)\n\t sum_N_pos += A_diag_data[jj];\n\t else\n\t sum_N_neg += A_diag_data[jj];\n\t } \n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n\t if (A_diag_data[jj] > 0)\n\t\t sum_P_pos += A_diag_data[jj];\n\t else\n\t\t sum_P_neg += A_diag_data[jj];\n }\n\n } \n \n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the \n * off-diagonal part of A \n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n i1 = A_offd_j[jj];\n\t if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])\n\t { \n\t if (A_offd_data[jj] > 0)\n\t sum_N_pos += A_offd_data[jj];\n\t else\n\t sum_N_neg += A_offd_data[jj];\n\t } \n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n\t if (A_offd_data[jj] > 0)\n\t\t sum_P_pos += A_offd_data[jj];\n\t else\n\t\t sum_P_neg += A_offd_data[jj];\n }\n\n }\n } \n if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;\n if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n if (P_diag_data[jj]> 0)\n P_diag_data[jj] *= -beta;\n else\n P_diag_data[jj] *= -alfa;\n }\n\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n if (P_offd_data[jj]> 0)\n P_offd_data[jj] *= -beta;\n else\n P_offd_data[jj] *= -alfa;\n }\n \n }\n\n P_offd_i[i+1] = jj_counter_offd;\n }\n hypre_TFree(P_marker);\n hypre_TFree(P_marker_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "------------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\nfor (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (jl < rest)\n {\n ns = jl*size+jl;\n ne = (jl+1)*size+jl+1;\n }\n else\n {\n ns = jl*size+rest;\n ne = (jl+1)*size+rest;\n }\n jj_counter = 0;\n if (jl > 0) jj_counter = jj_count[jl-1];\n jj_counter_offd = 0;\n if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);\n if (num_cols_A_offd)\n\tP_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n else\n\tP_marker_offd = NULL;\n\n for (i = 0; i < n_fine; i++)\n { \n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n { \n P_marker_offd[i] = -1;\n }\n \n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n \n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n { \n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n }\n }\n }\n \n jj_end_row_offd = jj_counter_offd;\n \n diagonal = A_diag_data[A_diag_i[i]];\n\n \n /* Loop over ith row of A. First, the diagonal part of A */\n\t sum_N_pos = 0;\n\t sum_N_neg = 0;\n\t sum_P_pos = 0;\n\t sum_P_neg = 0;\n\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n i1 = A_diag_j[jj];\n\t if (num_functions == 1 || dof_func[i1] == dof_func[i])\n\t { \n\t if (A_diag_data[jj] > 0)\n\t sum_N_pos += A_diag_data[jj];\n\t else\n\t sum_N_neg += A_diag_data[jj];\n\t } \n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n\t if (A_diag_data[jj] > 0)\n\t\t sum_P_pos += A_diag_data[jj];\n\t else\n\t\t sum_P_neg += A_diag_data[jj];\n }\n\n } \n \n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the \n * off-diagonal part of A \n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n i1 = A_offd_j[jj];\n\t if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])\n\t { \n\t if (A_offd_data[jj] > 0)\n\t sum_N_pos += A_offd_data[jj];\n\t else\n\t sum_N_neg += A_offd_data[jj];\n\t } \n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n\t if (A_offd_data[jj] > 0)\n\t\t sum_P_pos += A_offd_data[jj];\n\t else\n\t\t sum_P_neg += A_offd_data[jj];\n }\n\n }\n } \n if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;\n if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;\n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n if (P_diag_data[jj]> 0)\n P_diag_data[jj] *= -beta;\n else\n P_diag_data[jj] *= -alfa;\n }\n\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n if (P_offd_data[jj]> 0)\n P_offd_data[jj] *= -beta;\n else\n P_offd_data[jj] *= -alfa;\n }\n \n }\n\n P_offd_i[i+1] = jj_counter_offd;\n }\n hypre_TFree(P_marker);\n hypre_TFree(P_marker_offd);\n } #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "d_size)\n {\n P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < num_cols_A_offd; i++)\n P_marker[i] = 0;\n \n num_cols_P_offd = 0;\n for (i=0; i < P_offd_size; i++)\n {\n index = P_offd_j[i];\n if (!P_marker[index])\n {\n num_cols_P_offd++;\n P_marker[index] = 1;\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "marker[index]==0) index++;\n col_map_offd_P[i] = index++;\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < P_offd_size; i++)\n\tP_offd_j[i] = hypre_BinarySearch(col_map_offd_P,\n\t\t\t\t\t P_offd_j[i],\n\t\t\t\t\t num_cols_P_offd);\n hypre_TFree(P_marker); \n }\n\n for (i=0; i < n_fine; i++)\n if (CF_marker[i] == -3) CF_marker[i] = -1;\n\n if (num_cols_P_offd)\n { \n \thypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;\n hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "Int, num_threads);\n\n fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;\n\n jj_counter = start_indexing;\n jj_counter_offd = start_indexing;\n \n /*-----------------------------------------------------------------------\n * Loop over fine grid.\n *-----------------------------------------------------------------------*/\n\n/* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE\n\n for (j = 0; j < num_threads; j++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n\t if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------*/\n\n/* RDF: this looks a little tricky, but doable */\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a C-point, interpolation is the identity. Also set up\n * mapping vector.\n *--------------------------------------------------------------------*/\n\n if (CF_marker[i] >= 0)\n {\n jj_count[j]++;\n fine_to_coarse[i] = coarse_counter[j];\n coarse_counter[j]++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, interpolation is from the C-points that\n * strongly influence i.\n *--------------------------------------------------------------------*/\n\n else\n {\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n if (CF_marker[i1] >= 0)\n {\n jj_count[j]++;\n }\n }\n\n if (num_procs > 1)\n {\n\t if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n if (CF_marker_offd[i1] >= 0)\n {\n jj_count_offd[j]++;\n }\n }\n }\n }\n }\n }\n } #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ");\n\n fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd); \n\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_threads; j++)\n {\n coarse_shift = 0;\n if (j > 0) coarse_shift = coarse_counter[j-1];\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (j < rest)\n {\n ns = j*size+j;\n ne = (j+1)*size+j+1;\n }\n else\n {\n ns = j*size+rest;\n ne = (j+1)*size+rest;\n }\n for (i = ns; i < ne; i++)\n\tfine_to_coarse[i] += my_first_cpt+coarse_shift;\n } #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "LL);\n }\n\n if (debug_flag==4) wall_time = time_getWallclockSeconds();\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;\n\n /*-----------------------------------------------------------------------\n * Loop over fine grid points.\n *-----------------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE\n\n for (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (jl < rest)\n {\n ns = jl*size+jl;\n ne = (jl+1)*size+jl+1;\n }\n else\n {\n ns = jl*size+rest;\n ne = (jl+1)*size+rest;\n }\n jj_counter = 0;\n if (jl > 0) jj_counter = jj_count[jl-1];\n jj_counter_offd = 0;\n if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);\n if (num_cols_A_offd)\n\tP_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n else\n\tP_marker_offd = NULL;\n\n for (i = 0; i < n_fine; i++)\n { \n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n { \n P_marker_offd[i] = -1;\n }\n strong_f_marker = -2;\n \n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n \n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n { \n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n P_marker[i1] = strong_f_marker;\n } \n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n } \n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n } \n }\n }\n }\n \n jj_end_row_offd = jj_counter_offd;\n \n diagonal = A_diag_data[A_diag_i[i]];\n\n \n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n \n HERE, we only want to distribut to points of the SAME function type \n\n *--------------------------------------------------------------*/\n \n else if (P_marker[i1] == strong_f_marker)\n {\n sum = zero;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n\t sgn = 1;\n\t if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n \n if (P_marker[i2] >= jj_begin_row && \n (sgn*A_diag_data[jj1]) < 0 )\n {\n sum += A_diag_data[jj1];\n }\n }\n \n }\n\n /* Off-Diagonal block part of row i1 */ \n if (num_procs > 1)\n { \n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn*A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n }\n } \n\n if (sum != 0)\n\t {\n distribute = A_diag_data[jj] / sum;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker[i2] >= jj_begin_row \n && (sgn*A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n \n }\n \n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn*A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]] \n += distribute * A_offd_data[jj1]; \n }\n }\n }\n \n }\n }\n else /* sum = 0 - only add to diag if the same function type */\n {\n\t\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n }\n }\n \n /*--------------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal. (only if the same function type)\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n } \n\n } \n \n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the \n * off-diagonal part of A \n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n\n AGAIN, we only want to distribut to points of the SAME function type \n\n *-----------------------------------------------------------*/\n \n else if (P_marker_offd[i1] == strong_f_marker)\n {\n sum = zero;\n \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n\t\t sgn = 1;\n\t\t if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n { \n if (i2 > -1)\n { \n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n && (sgn*A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else \n { \n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n && (sgn*A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n \n }\n }\n if (sum != 0)\n\t\t {\n distribute = A_offd_data[jj] / sum; \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do \n * the distribution.\n *--------------------------------------------------------*/\n \n /* Diagonal block part of row i1 */\n \n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (i2 > -1) /* in the diagonal block */ \n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn*A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n && (sgn*A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2-1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n }\n\t\t else /* sum = 0 */\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n }\n }\n \n /*-----------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n } \n\n }\n } \n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n if (diagonal == 0.0)\n {\n if (print_level)\n hypre_printf(\" Warning! zero diagonal! Proc id %d row %d\\n\", my_id,i); \n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] = 0.0;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] = 0.0;\n }\n }\n else\n {\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n }\n\n strong_f_marker--; \n\n P_offd_i[i+1] = jj_counter_offd;\n }\n hypre_TFree(P_marker);\n hypre_TFree(P_marker_offd);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "------------------------------------------------------------------*/\n \n#ifdef HYPRE_USING_OPENMP\nfor (jl = 0; jl < num_threads; jl++)\n {\n size = n_fine/num_threads;\n rest = n_fine - size*num_threads;\n if (jl < rest)\n {\n ns = jl*size+jl;\n ne = (jl+1)*size+jl+1;\n }\n else\n {\n ns = jl*size+rest;\n ne = (jl+1)*size+rest;\n }\n jj_counter = 0;\n if (jl > 0) jj_counter = jj_count[jl-1];\n jj_counter_offd = 0;\n if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];\n\n P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);\n if (num_cols_A_offd)\n\tP_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n else\n\tP_marker_offd = NULL;\n\n for (i = 0; i < n_fine; i++)\n { \n P_marker[i] = -1;\n }\n for (i = 0; i < num_cols_A_offd; i++)\n { \n P_marker_offd[i] = -1;\n }\n strong_f_marker = -2;\n \n for (i = ns; i < ne; i++)\n {\n \n /*--------------------------------------------------------------------\n * If i is a c-point, interpolation is the identity.\n *--------------------------------------------------------------------*/\n \n if (CF_marker[i] >= 0)\n {\n P_diag_i[i] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i];\n P_diag_data[jj_counter] = one;\n jj_counter++;\n }\n \n /*--------------------------------------------------------------------\n * If i is an F-point, build interpolation.\n *--------------------------------------------------------------------*/\n\n else\n { \n /* Diagonal part of P */\n P_diag_i[i] = jj_counter;\n jj_begin_row = jj_counter;\n\n for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)\n {\n i1 = S_diag_j[jj]; \n\n /*--------------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_diag_j\n * and initialize interpolation weight to zero.\n *--------------------------------------------------------------*/\n\n if (CF_marker[i1] >= 0)\n {\n P_marker[i1] = jj_counter;\n P_diag_j[jj_counter] = fine_to_coarse[i1];\n P_diag_data[jj_counter] = zero;\n jj_counter++;\n }\n\n /*--------------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n P_marker[i1] = strong_f_marker;\n } \n }\n jj_end_row = jj_counter;\n\n /* Off-Diagonal part of P */\n P_offd_i[i] = jj_counter_offd;\n jj_begin_row_offd = jj_counter_offd;\n\n\n if (num_procs > 1)\n {\n if (col_offd_S_to_A)\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = col_offd_S_to_A[S_offd_j[jj]]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n } \n }\n }\n else\n {\n for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)\n {\n i1 = S_offd_j[jj]; \n\n /*-----------------------------------------------------------\n * If neighbor i1 is a C-point, set column number in P_offd_j\n * and initialize interpolation weight to zero.\n *-----------------------------------------------------------*/\n\n if (CF_marker_offd[i1] >= 0)\n {\n P_marker_offd[i1] = jj_counter_offd;\n /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/\n P_offd_j[jj_counter_offd] = i1;\n P_offd_data[jj_counter_offd] = zero;\n jj_counter_offd++;\n }\n\n /*-----------------------------------------------------------\n * If neighbor i1 is an F-point, mark it as a strong F-point\n * whose connection needs to be distributed.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n P_marker_offd[i1] = strong_f_marker;\n } \n }\n }\n }\n \n jj_end_row_offd = jj_counter_offd;\n \n diagonal = A_diag_data[A_diag_i[i]];\n\n \n /* Loop over ith row of A. First, the diagonal part of A */\n\n for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)\n {\n i1 = A_diag_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker[i1] >= jj_begin_row)\n {\n P_diag_data[P_marker[i1]] += A_diag_data[jj];\n }\n\n /*--------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n \n HERE, we only want to distribut to points of the SAME function type \n\n *--------------------------------------------------------------*/\n \n else if (P_marker[i1] == strong_f_marker)\n {\n sum = zero;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *-----------------------------------------------------------*/\n\t sgn = 1;\n\t if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n \n if (P_marker[i2] >= jj_begin_row && \n (sgn*A_diag_data[jj1]) < 0 )\n {\n sum += A_diag_data[jj1];\n }\n }\n \n }\n\n /* Off-Diagonal block part of row i1 */ \n if (num_procs > 1)\n { \n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn*A_offd_data[jj1]) < 0)\n {\n sum += A_offd_data[jj1];\n }\n }\n }\n } \n\n if (sum != 0)\n\t {\n distribute = A_diag_data[jj] / sum;\n \n /*-----------------------------------------------------------\n * Loop over row of A for point i1 and do the distribution.\n *-----------------------------------------------------------*/\n\n /* Diagonal block part of row i1 */\n for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)\n {\n i2 = A_diag_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker[i2] >= jj_begin_row \n && (sgn*A_diag_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_diag_data[jj1];\n }\n }\n \n }\n \n /* Off-Diagonal block part of row i1 */\n if (num_procs > 1)\n {\n for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)\n {\n i2 = A_offd_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (P_marker_offd[i2] >= jj_begin_row_offd\n && (sgn*A_offd_data[jj1]) < 0)\n {\n P_offd_data[P_marker_offd[i2]] \n += distribute * A_offd_data[jj1]; \n }\n }\n }\n \n }\n }\n else /* sum = 0 - only add to diag if the same function type */\n {\n\t\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n }\n }\n \n /*--------------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal. (only if the same function type)\n *--------------------------------------------------------------*/\n\n else if (CF_marker[i1] != -3)\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func[i1])\n diagonal += A_diag_data[jj];\n } \n\n } \n \n\n /*----------------------------------------------------------------\n * Still looping over ith row of A. Next, loop over the \n * off-diagonal part of A \n *---------------------------------------------------------------*/\n\n if (num_procs > 1)\n {\n for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)\n {\n i1 = A_offd_j[jj];\n\n /*--------------------------------------------------------------\n * Case 1: neighbor i1 is a C-point and strongly influences i,\n * accumulate a_{i,i1} into the interpolation weight.\n *--------------------------------------------------------------*/\n\n if (P_marker_offd[i1] >= jj_begin_row_offd)\n {\n P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];\n }\n\n /*------------------------------------------------------------\n * Case 2: neighbor i1 is an F-point and strongly influences i,\n * distribute a_{i,i1} to C-points that strongly infuence i.\n * Note: currently no distribution to the diagonal in this case.\n\n AGAIN, we only want to distribut to points of the SAME function type \n\n *-----------------------------------------------------------*/\n \n else if (P_marker_offd[i1] == strong_f_marker)\n {\n sum = zero;\n \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and calculate the sum\n * of the connections to c-points that strongly influence i.\n *---------------------------------------------------------*/\n\n /* find row number */\n c_num = A_offd_j[jj];\n\n\t\t sgn = 1;\n\t\t if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;\n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n { \n if (i2 > -1)\n { \n /* in the diagonal block */\n if (P_marker[i2] >= jj_begin_row\n && (sgn*A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n else \n { \n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n && (sgn*A_ext_data[jj1]) < 0)\n {\n sum += A_ext_data[jj1];\n }\n }\n \n }\n }\n if (sum != 0)\n\t\t {\n distribute = A_offd_data[jj] / sum; \n /*---------------------------------------------------------\n * Loop over row of A_ext for point i1 and do \n * the distribution.\n *--------------------------------------------------------*/\n \n /* Diagonal block part of row i1 */\n \n for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)\n {\n i2 = A_ext_j[jj1];\n if (num_functions == 1 || dof_func[i1] == dof_func[i2])\n {\n if (i2 > -1) /* in the diagonal block */ \n {\n if (P_marker[i2] >= jj_begin_row\n && (sgn*A_ext_data[jj1]) < 0)\n {\n P_diag_data[P_marker[i2]]\n += distribute * A_ext_data[jj1];\n }\n }\n else\n {\n /* in the off_diagonal block */\n if (P_marker_offd[-i2-1] >= jj_begin_row_offd\n && (sgn*A_ext_data[jj1]) < 0)\n P_offd_data[P_marker_offd[-i2-1]]\n += distribute * A_ext_data[jj1];\n }\n }\n }\n }\n\t\t else /* sum = 0 */\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n }\n }\n \n /*-----------------------------------------------------------\n * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}\n * into the diagonal.\n *-----------------------------------------------------------*/\n\n else if (CF_marker_offd[i1] != -3)\n {\n\t if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])\n diagonal += A_offd_data[jj];\n } \n\n }\n } \n\n /*-----------------------------------------------------------------\n * Set interpolation weight by dividing by the diagonal.\n *-----------------------------------------------------------------*/\n\n if (diagonal == 0.0)\n {\n if (print_level)\n hypre_printf(\" Warning! zero diagonal! Proc id %d row %d\\n\", my_id,i); \n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] = 0.0;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] = 0.0;\n }\n }\n else\n {\n for (jj = jj_begin_row; jj < jj_end_row; jj++)\n {\n P_diag_data[jj] /= -diagonal;\n }\n for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)\n {\n P_offd_data[jj] /= -diagonal;\n }\n }\n }\n\n strong_f_marker--; \n\n P_offd_i[i+1] = jj_counter_offd;\n }\n hypre_TFree(P_marker);\n hypre_TFree(P_marker_offd);\n } #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "d_size)\n {\n P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < num_cols_A_offd; i++)\n\t P_marker[i] = 0;\n\n num_cols_P_offd = 0;\n for (i=0; i < P_offd_size; i++)\n {\n\t index = P_offd_j[i];\n\t if (!P_marker[index])\n\t {\n \t num_cols_P_offd++;\n \t P_marker[index] = 1;\n \t }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/parcsr_ls/par_interp.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "marker[index]==0) index++;\n col_map_offd_P[i] = index++;\n }\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < P_offd_size; i++)\n\tP_offd_j[i] = hypre_BinarySearch(col_map_offd_P,\n\t\t\t\t\t P_offd_j[i],\n\t\t\t\t\t num_cols_P_offd);\n hypre_TFree(P_marker); \n }\n\n for (i=0; i < n_fine; i++)\n if (CF_marker[i] == -3) CF_marker[i] = -1;\n\n if (num_cols_P_offd)\n { \n \thypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;\n \thypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/utilities/hypre_merge_sort.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "n, out);\n hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads());\n HYPRE_Int i;\nfor (i = 0; i < len; i++)\n {\n HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i);\n assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);\n#ifdef DBG_MERGE_SORT\n if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)\n {\n fprintf(stderr, \"%d %d\\n\", i, (*out)[i]);\n assert(false);\n }\n\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/utilities/hypre_hopscotch_hash.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " num_buckets);\n s->hash = hypre_TAlloc(HYPRE_Int, num_buckets);\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\nfor (i = 0; i < num_buckets; ++i)\n {\n s->hopInfo[i] = 0;\n s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/utilities/hypre_hopscotch_hash.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n\n m->table = hypre_TAlloc(hypre_HopscotchBucket, num_buckets);\n\n#ifdef HYPRE_CONCURRENT_HOPSCOTCH\nfor (i = 0; i < num_buckets; i++)\n {\n InitBucket(&m->table[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/test/amg.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "HYPRE_Int, local_size);\n data = hypre_CTAlloc(HYPRE_Real, local_size);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i < local_size; i++)\n {\n num_cols[i] = 1;\n row_nums[i] = first_row+i;\n col_nums[i] = first_row+i;\n data[i] = eps;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matrix.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "RE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);\n\n HYPRE_Int i, j;\n\n#ifdef HYPRE_USING_OPENMP\nfor (i=0; i <= num_rows; i++)\n {\n B_i[i] = A_i[i];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matrix.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "\n#endif\n for (i=0; i <= num_rows; i++)\n {\n B_i[i] = A_i[i];\n }\n#ifdef HYPRE_USING_OPENMP\nfor (j = 0; j < num_nonzeros; ++j)\n {\n B_j[j] = A_j[j];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matrix.c", "omp_pragma_line": "#pragma omp parallel for HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " A_data = hypre_CSRMatrixData(A);\n B_data = hypre_CSRMatrixData(B);\n#ifdef HYPRE_USING_OPENMP\nfor (j=0; j < num_nonzeros; j++)\n {\n B_data[j] = A_data[j];\n } #pragma omp parallel for HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " \n HYPRE_Int ierr = 0;\n\n size *=hypre_VectorNumVectors(v);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n vector_data[i] = value;\n\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();\n\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_SeqVectorSetRandomValues\n *\n * returns vector of values randomly distributed between -1.0 and +1.0\n *--------------------------------------------------------------------------*/\n\nHYPRE_Int\nhypre_SeqVectorSetRandomValues( hypre_Vector *v,\n HYPRE_Int seed )\n{\n HYPRE_Complex *vector_data = hypre_VectorData(v);\n HYPRE_Int size = hypre_VectorSize(v);\n \n HYPRE_Int i;\n \n HYPRE_Int ierr = 0;\n hypre_SeedRand(seed);\n\n size *=hypre_VectorNumVectors(v);\n\n/* RDF: threading this loop may cause problems because of hypre_Rand() */\n for (i = 0; i < size; i++)\n vector_data[i] = 2.0 * hypre_Rand() - 1.0;\n\n return ierr;\n} #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (size > size_y) size = size_y;\n size *=hypre_VectorNumVectors(x);\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n y_data[i] = x_data[i];\n\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();\n\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_SeqVectorCloneDeep\n * Returns a complete copy of x - a deep copy, with its own copy of the data.\n *--------------------------------------------------------------------------*/\n\nhypre_Vector *\nhypre_SeqVectorCloneDeep( hypre_Vector *x )\n{\n HYPRE_Int size = hypre_VectorSize(x);\n HYPRE_Int num_vectors = hypre_VectorNumVectors(x);\n hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );\n\n hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);\n hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);\n hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);\n\n hypre_SeqVectorInitialize(y);\n hypre_SeqVectorCopy( x, y );\n\n return y;\n} #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " \n HYPRE_Int ierr = 0;\n\n size *=hypre_VectorNumVectors(y);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n y_data[i] *= alpha;\n\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();\n\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_SeqVectorAxpy\n *--------------------------------------------------------------------------*/\n\nHYPRE_Int\nhypre_SeqVectorAxpy( HYPRE_Complex alpha,\n hypre_Vector *x,\n hypre_Vector *y )\n{\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();\n\n\n HYPRE_Complex *x_data = hypre_VectorData(x);\n HYPRE_Complex *y_data = hypre_VectorData(y);\n HYPRE_Int size = hypre_VectorSize(x);\n \n HYPRE_Int i;\n \n HYPRE_Int ierr = 0;\n\n size *=hypre_VectorNumVectors(x);\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < size; i++)\n y_data[i] += alpha * x_data[i];\n\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();\n\n\n return ierr;\n} #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " \n HYPRE_Int ierr = 0;\n\n size *=hypre_VectorNumVectors(x);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n y_data[i] += alpha * x_data[i];\n\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();\n\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_SeqVectorInnerProd\n *--------------------------------------------------------------------------*/\n\nHYPRE_Real hypre_SeqVectorInnerProd( hypre_Vector *x,\n hypre_Vector *y )\n{\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();\n\n\n HYPRE_Complex *x_data = hypre_VectorData(x);\n HYPRE_Complex *y_data = hypre_VectorData(y);\n HYPRE_Int size = hypre_VectorSize(x);\n \n HYPRE_Int i;\n\n HYPRE_Real result = 0.0;\n\n size *=hypre_VectorNumVectors(x);\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < size; i++)\n result += hypre_conj(y_data[i]) * x_data[i];\n\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();\n\n\n return result;\n} #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/vector.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": ";\n\n HYPRE_Real result = 0.0;\n\n size *=hypre_VectorNumVectors(x);\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < size; i++)\n result += hypre_conj(y_data[i]) * x_data[i];\n\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();\n\n\n return result;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_VectorSumElts:\n * Returns the sum of all vector elements.\n *--------------------------------------------------------------------------*/\n\nHYPRE_Complex hypre_VectorSumElts( hypre_Vector *vector )\n{\n HYPRE_Complex sum = 0;\n HYPRE_Complex *data = hypre_VectorData( vector );\n HYPRE_Int size = hypre_VectorSize( vector );\n HYPRE_Int i;\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE\n\n for ( i=0; i #pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows*num_vectors; i++)\n y_data[i] = beta*b_data[i];\n\n#ifdef HYPRE_PROFILE\n hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;\n\n\n return ierr;\n }\n\n if (x == y)\n {\n x_tmp = hypre_SeqVectorCloneDeep(x);\n x_data = hypre_VectorData(x_tmp);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows*num_vectors; i++)\n y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_rows*num_vectors; i++)\n y_data[i] = b_data[i]*temp;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " i++)\n y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows*num_vectors; i++)\n y_data[i] = b_data[i]*temp;\n }\n }\n else\n {\n for (i = 0; i < num_rows*num_vectors; i++)\n y_data[i] = b_data[i];\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "----------------------*/\n\n if (num_rownnz < xpar*(num_rows))\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rownnz; i++)\n {\n m = A_rownnz[i];\n\n /*\n * for (jj = A_i[m]; jj < A_i[m+1]; jj++)\n * {\n * j = A_j[jj];\n * y_data[m] += A_data[jj] * x_data[j];\n * } */\n if ( num_vectors==1 )\n {\n tempx = 0;\n for (jj = A_i[m]; jj < A_i[m+1]; jj++)\n tempx += A_data[jj] * x_data[A_j[jj]];\n y_data[m] += tempx;\n }\n else\n for ( j=0; j #pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " }\n }\n }\n else // num_vectors > 1\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n {\n for (j = 0; j < num_vectors; ++j)\n {\n tempx = 0;\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];\n }\n y_data[ j*vecstride_y + i*idxstride_y ] += tempx;\n }\n } #pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows*num_vectors; i++)\n y_data[i] *= alpha;\n }\n }\n else\n { // JSP: this is currently the only path optimized\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel private(i,jj,tempx)\n\n {\n HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A);\n HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A);\n hypre_assert(iBegin <= iEnd);\n hypre_assert(iBegin >= 0 && iBegin <= num_rows);\n hypre_assert(iEnd >= 0 && iEnd <= num_rows);\n\n if (0 == temp)\n {\n if (1 == alpha) // JSP: a common path\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = 0.0;\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx += A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = tempx;\n }\n } // y = A*x\n else if (-1 == alpha)\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = 0.0;\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx -= A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = tempx;\n }\n } // y = -A*x\n else\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = 0.0;\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx += A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = alpha*tempx;\n }\n } // y = alpha*A*x\n } // temp == 0\n else if (-1 == temp) // beta == -alpha\n {\n if (1 == alpha) // JSP: a common path\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = -b_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx += A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = tempx;\n }\n } // y = A*x - y\n else if (-1 == alpha) // JSP: a common path\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = b_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx -= A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = tempx;\n }\n } // y = -A*x + y\n else\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = -b_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx += A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = alpha*tempx;\n }\n } // y = alpha*(A*x - y)\n } // temp == -1\n else if (1 == temp)\n {\n if (1 == alpha) // JSP: a common path\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = b_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx += A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = tempx;\n }\n } // y = A*x + y\n else if (-1 == alpha)\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = -b_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx -= A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = tempx;\n }\n } // y = -A*x - y\n else\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = b_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx += A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = alpha*tempx;\n }\n } // y = alpha*(A*x + y)\n }\n else\n {\n if (1 == alpha) // JSP: a common path\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = b_data[i]*temp;\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx += A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = tempx;\n }\n } // y = A*x + temp*y\n else if (-1 == alpha)\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = -b_data[i]*temp;\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx -= A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = tempx;\n }\n } // y = -A*x - temp*y\n else\n {\n for (i = iBegin; i < iEnd; i++)\n {\n tempx = b_data[i]*temp;\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n tempx += A_data[jj] * x_data[A_j[jj]];\n }\n y_data[i] = alpha*tempx;\n }\n } // y = alpha*(A*x + temp*y)\n } // temp != 0 && temp != -1 && temp != 1\n } // omp parallel\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_cols*num_vectors; i++)\n y_data[i] *= beta;\n\n return ierr;\n }\n\n if (x == y)\n {\n x_tmp = hypre_SeqVectorCloneDeep(x);\n x_data = hypre_VectorData(x_tmp);\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "beta / alpha;\n \n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_cols*num_vectors; i++)\n y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_cols*num_vectors; i++)\n y_data[i] *= temp;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "num_vectors; i++)\n y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_cols*num_vectors; i++)\n y_data[i] *= temp;\n }\n }\n\n /*-----------------------------------------------------------------\n * y += A^T*x\n *-----------------------------------------------------------------*/\n num_threads = hypre_NumThreads();\n if (num_threads > 1)\n {\n y_data_expand = hypre_CTAlloc(HYPRE_Complex, num_threads*y_size);\n\n if ( num_vectors==1 )\n {\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel private(i,jj,j,my_thread_num,offset)\n\n { \n my_thread_num = hypre_GetThreadNum();\n offset = y_size*my_thread_num;\n#ifdef HYPRE_USING_OPENMP\n#pragma omp for HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_rows; i++)\n {\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n {\n j = A_j[jj];\n y_data_expand[offset + j] += A_data[jj] * x_data[i];\n }\n }\n\n /* implied barrier (for threads)*/ \n#ifdef HYPRE_USING_OPENMP\n#pragma omp for HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < y_size; i++)\n {\n for (j = 0; j < num_threads; j++)\n {\n y_data[i] += y_data_expand[j*y_size + i];\n \n }\n }\n\n } /* end parallel threaded region */\n }\n else\n {\n /* multiple vector case is not threaded */\n for (i = 0; i < num_rows; i++)\n {\n for ( jv=0; jv #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_cols*num_vectors; i++)\n y_data[i] *= alpha;\n }\n\n if (x == y) hypre_SeqVectorDestroy(x_tmp);\n\n return ierr;\n}\n\n/*--------------------------------------------------------------------------\n * hypre_CSRMatrixMatvec_FF\n *--------------------------------------------------------------------------*/\n\nHYPRE_Int\nhypre_CSRMatrixMatvec_FF( HYPRE_Complex alpha,\n hypre_CSRMatrix *A,\n hypre_Vector *x,\n HYPRE_Complex beta,\n hypre_Vector *y,\n HYPRE_Int *CF_marker_x,\n HYPRE_Int *CF_marker_y,\n HYPRE_Int fpt )\n{\n HYPRE_Complex *A_data = hypre_CSRMatrixData(A);\n HYPRE_Int *A_i = hypre_CSRMatrixI(A);\n HYPRE_Int *A_j = hypre_CSRMatrixJ(A);\n HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);\n HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);\n\n HYPRE_Complex *x_data = hypre_VectorData(x);\n HYPRE_Complex *y_data = hypre_VectorData(y);\n HYPRE_Int x_size = hypre_VectorSize(x);\n HYPRE_Int y_size = hypre_VectorSize(y);\n\n HYPRE_Complex temp;\n\n HYPRE_Int i, jj;\n\n HYPRE_Int ierr = 0;\n\n /*---------------------------------------------------------------------\n * Check for size compatibility. Matvec returns ierr = 1 if\n * length of X doesn't equal the number of columns of A,\n * ierr = 2 if the length of Y doesn't equal the number of rows\n * of A, and ierr = 3 if both are true.\n *\n * Because temporary vectors are often used in Matvec, none of\n * these conditions terminates processing, and the ierr flag\n * is informational only.\n *--------------------------------------------------------------------*/\n\n if (num_cols != x_size)\n ierr = 1;\n\n if (num_rows != y_size)\n ierr = 2;\n\n if (num_cols != x_size && num_rows != y_size)\n ierr = 3;\n\n /*-----------------------------------------------------------------------\n * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS\n *-----------------------------------------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= beta;\n\n return ierr;\n }\n\n /*-----------------------------------------------------------------------\n * y = (beta/alpha)*y\n *-----------------------------------------------------------------------*/\n\n temp = beta / alpha;\n\n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= temp;\n }\n }\n\n /*-----------------------------------------------------------------\n * y += A*x\n *-----------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE\n\n\n for (i = 0; i < num_rows; i++)\n {\n if (CF_marker_x[i] == fpt)\n {\n temp = y_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];\n y_data[i] = temp;\n }\n }\n\n /*-----------------------------------------------------------------\n * y = alpha*y\n *-----------------------------------------------------------------*/\n\n if (alpha != 1.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= alpha;\n }\n\n return ierr;\n} #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": "--------------------------------------------*/\n\n if (alpha == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= beta;\n\n return ierr;\n }\n\n /*-----------------------------------------------------------------------\n * y = (beta/alpha)*y\n *-----------------------------------------------------------------------*/\n\n temp = beta / alpha;\n\n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= temp;\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " = beta / alpha;\n\n if (temp != 1.0)\n {\n if (temp == 0.0)\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE\n\n for (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= temp;\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " if (CF_marker_x[i] == fpt) y_data[i] = 0.0;\n }\n else\n {\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n if (CF_marker_x[i] == fpt) y_data[i] *= temp;\n }\n }\n\n /*-----------------------------------------------------------------\n * y += A*x\n *-----------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\n#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE\n\n\n for (i = 0; i < num_rows; i++)\n {\n if (CF_marker_x[i] == fpt)\n {\n temp = y_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];\n y_data[i] = temp;\n }\n } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AMG/seq_mv/csr_matvec.c", "omp_pragma_line": "#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE", "context_chars": 100, "text": " *-----------------------------------------------------------------*/\n\n#ifdef HYPRE_USING_OPENMP\nfor (i = 0; i < num_rows; i++)\n {\n if (CF_marker_x[i] == fpt)\n {\n temp = y_data[i];\n for (jj = A_i[i]; jj < A_i[i+1]; jj++)\n if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];\n y_data[i] = temp;\n }\n } #pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/sequential/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,xx,yy)", "context_chars": 100, "text": " xx, yy;\n\n dx = 2.0 / (n - 1);\n dy = 2.0 / (m - 1);\n\n/* Initialize initial condition and RHS */\n//for (i = 0; i < n; i++)\n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(i,j,xx,yy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/sequential/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1)", "context_chars": 100, "text": "es \nc-------------------------------------------------------------------*/\n //for (j = 1; j < grid_points[1]-1; j++) {\n eta = (double)j * dnym1;\n \n for (k = 1; k < grid_points[2]-1; k++) {\n zeta = (double)k * dnzm1;\n\n for (i = 0; i < grid_points[0]; i++) {\n\txi = (double)i * dnxm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[i][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0 / dtemp[0];\n\n\tfor (m = 1; m <= 4; m++) {\n\t buf[i][m] = dtpp * dtemp[m];\n\t}\n\n\tcuf[i] = buf[i][1] * buf[i][1];\n\tbuf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + \n\t buf[i][3] * buf[i][3];\n\tq[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +\n\t\t buf[i][3]*ue[i][3]);\n }\n \n for (i = 1; i < grid_points[0]-1; i++) {\n\tim1 = i-1;\n\tip1 = i+1;\n\n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tx2*(ue[ip1][1]-ue[im1][1])+\n\t dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-\n\t\t (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+\n\t xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+\n\t dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+\n\t xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+\n\t dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);\n \n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+\n\t xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+\n\t dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-\n\t buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+\n\t 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+\n\t xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+\n\t xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+\n\t dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n\n for (m = 0; m < 5; m++) {\n\ti = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);\n\ti = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[i-1][m] + 6.0*ue[i][m] -\n\t 4.0*ue[i+1][m] + ue[i+2][m]);\n }\n\n for (m = 0; m < 5; m++) {\n\tfor (i = 1*3; i <= grid_points[0]-3*1-1; i++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\ti = grid_points[0]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m]);\n\ti = grid_points[0]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);\n }\n\n }\n } #pragma omp parallel for firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/sequential/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(xi, zeta, eta) ", "context_chars": 100, "text": "differences \nc-------------------------------------------------------------------*/\n //for (i = 1; i < grid_points[0]-1; i++) {\n xi = (double)i * dnxm1;\n \n for (k = 1; k < grid_points[2]-1; k++) {\n zeta = (double)k * dnzm1;\n\n for (j = 0; j < grid_points[1]; j++) {\n\teta = (double)j * dnym1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[j][m] = dtemp[m];\n\t}\n \n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m <= 4; m++) {\n\t buf[j][m] = dtpp * dtemp[m];\n\t}\n\n\tcuf[j] = buf[j][2] * buf[j][2];\n\tbuf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + \n\t buf[j][3] * buf[j][3];\n\tq[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +\n\t\t buf[j][3]*ue[j][3]);\n }\n\n for (j = 1; j < grid_points[1]-1; j++) {\n\tjm1 = j-1;\n\tjp1 = j+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t ty2*( ue[jp1][2]-ue[jm1][2] )+\n\t dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+\n\t yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+\n\t dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-\n\t (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+\n\t yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+\n\t dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+\n\t yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+\n\t dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-\n\t buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+\n\t 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+\n buf[jm1][0])+\n\t yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+\n\t yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+\n\t dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tj = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);\n\tj = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[j-1][m] + 6.0*ue[j][m] -\n\t 4.0*ue[j+1][m] + ue[j+2][m]);\n }\n\n for (m = 0; m < 5; m++) {\n\tfor (j = 1*3; j <= grid_points[1]-3*1-1; j++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tj = grid_points[1]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m]);\n\tj = grid_points[1]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);\n }\n\n }\n } #pragma omp parallel for private(xi, zeta, eta) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/sequential/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(xi, eta, zeta)", "context_chars": 100, "text": "es \nc-------------------------------------------------------------------*/\n //for (i = 1; i < grid_points[0]-1; i++) {\n xi = (double)i * dnxm1;\n \n for (j = 1; j < grid_points[1]-1; j++) {\n eta = (double)j * dnym1;\n\n for (k = 0; k < grid_points[2]; k++) {\n\tzeta = (double)k * dnzm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[k][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m <= 4; m++) {\n\t buf[k][m] = dtpp * dtemp[m];\n\t}\n\n\tcuf[k] = buf[k][3] * buf[k][3];\n\tbuf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + \n\t buf[k][2] * buf[k][2];\n\tq[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +\n\t\t buf[k][3]*ue[k][3]);\n }\n\n for (k = 1; k < grid_points[2]-1; k++) {\n\tkm1 = k-1;\n\tkp1 = k+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tz2*( ue[kp1][3]-ue[km1][3] )+\n\t dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+\n\t zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+\n\t dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+\n\t zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+\n\t dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-\n\t\t (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+\n\t zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+\n\t dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-\n\t\t buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+\n\t 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]\n +buf[km1][0])+\n\t zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+\n\t zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+\n\t dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tk = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);\n\tk = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[k-1][m] + 6.0*ue[k][m] -\n\t 4.0*ue[k+1][m] + ue[k+2][m]);\n }\n\n for (m = 0; m < 5; m++) {\n\tfor (k = 1*3; k <= grid_points[2]-3*1-1; k++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tk = grid_points[2]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m]);\n\tk = grid_points[2]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);\n }\n\n }\n } #pragma omp parallel for private(xi, eta, zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/sequential/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2, tmp3) ", "context_chars": 100, "text": "led f) and s jacobians\nc---------------------------------------------------------------------*/\n //for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\n\ttmp1 = 1.0 / u[i][j][k][0];\n\ttmp2 = tmp1 * tmp1;\n\ttmp3 = tmp1 * tmp2;\n\n\tfjac[i][j][k][0][0] = 0.0;\n\tfjac[i][j][k][0][1] = 0.0;\n\tfjac[i][j][k][0][2] = 0.0;\n\tfjac[i][j][k][0][3] = 1.0;\n\tfjac[i][j][k][0][4] = 0.0;\n\n\tfjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) \n\t * tmp2;\n\tfjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][1][2] = 0.0;\n\tfjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][1][4] = 0.0;\n\n\tfjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][2][1] = 0.0;\n\tfjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][2][4] = 0.0;\n\n\tfjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) \n\t + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t\t + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );\n\tfjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][3][3] = ( 2.0 - c2 )\n\t * u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][3][4] = c2;\n\n\tfjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t\t\t+ u[i][j][k][2] * u[i][j][k][2]\n\t\t\t\t\t+ u[i][j][k][3] * u[i][j][k][3] )\n\t\t\t\t* tmp2\n\t\t\t\t- c1 * ( u[i][j][k][4] * tmp1 ) )\n\t * ( u[i][j][k][3] * tmp1 );\n\tfjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )\n\t - 0.50 * c2\n\t * ( ( u[i][j][k][1]*u[i][j][k][1]\n\t\t + u[i][j][k][2]*u[i][j][k][2]\n\t\t + 3.0*u[i][j][k][3]*u[i][j][k][3] )\n\t * tmp2 );\n\tfjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;\n\n\tnjac[i][j][k][0][0] = 0.0;\n\tnjac[i][j][k][0][1] = 0.0;\n\tnjac[i][j][k][0][2] = 0.0;\n\tnjac[i][j][k][0][3] = 0.0;\n\tnjac[i][j][k][0][4] = 0.0;\n\n\tnjac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][1][1] = c3c4 * tmp1;\n\tnjac[i][j][k][1][2] = 0.0;\n\tnjac[i][j][k][1][3] = 0.0;\n\tnjac[i][j][k][1][4] = 0.0;\n\n\tnjac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][2][1] = 0.0;\n\tnjac[i][j][k][2][2] = c3c4 * tmp1;\n\tnjac[i][j][k][2][3] = 0.0;\n\tnjac[i][j][k][2][4] = 0.0;\n\n\tnjac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][3][1] = 0.0;\n\tnjac[i][j][k][3][2] = 0.0;\n\tnjac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;\n\tnjac[i][j][k][3][4] = 0.0;\n\n\tnjac[i][j][k][4][0] = - ( c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][1]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))\n\t - ( con43 * c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))\n\t - c1345 * tmp2 * u[i][j][k][4];\n\n\tnjac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][4][3] = ( con43 * c3c4\n\t\t\t\t- c1345 ) * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][4][4] = ( c1345 )* tmp1;\n\n }\n }\n } #pragma omp parallel for private(tmp1, tmp2, tmp3) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/sequential/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----*/\n\n int i, j, k, d, m;\n double add;\n\n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n }\n\n //for (i = 0; i <= grid_points[0]-2; i++) {\n //#pragma omp parallel for \n for (j = 0; j <= grid_points[1]-2; j++) {\n //#pragma omp parallel for \n for (k = 0; k <= grid_points[2]-2; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/sequential/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " rms[m] = 0.0;\n }\n\n //#pragma omp parallel for \n for (i = 0; i <= grid_points[0]-2; i++) {\n //for (j = 0; j <= grid_points[1]-2; j++) {\n //#pragma omp parallel for \n for (k = 0; k <= grid_points[2]-2; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/sequential/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "0]-2; i++) {\n //#pragma omp parallel for \n for (j = 0; j <= grid_points[1]-2; j++) {\n //for (k = 0; k <= grid_points[2]-2; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/sequential/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for copyin(x, qq) private(x1, x2, t1, t2, t3, t4, ik, kk, i, l) reduction(+:sx) reduction(+:sy) ", "context_chars": 100, "text": " double qq[NQ];\t\t/* private copy of q[0:NQ-1] */\n\n for (i = 0; i < NQ; i++) qq[i] = 0.0;\n\n //for (k = 1; k <= np; k++) {\n\tkk = k_offset + k;\n\tt1 = S;\n\tt2 = an;\n\n/* Find starting seed t1 for this kk. */\n\n\tfor (i = 1; i <= 100; i++) {\n ik = kk / 2;\n if (2 * ik != kk) t3 = randlc(&t1, t2);\n if (ik == 0) break;\n t3 = randlc(&t2, t2);\n kk = ik;\n\t}\n\n/* Compute uniform pseudorandom numbers. */\n\n\tif (TIMERS_ENABLED == TRUE) timer_start(3);\n\tvranlc(2*NK, &t1, A, x-1);\n\tif (TIMERS_ENABLED == TRUE) timer_stop(3);\n\n/*\nc Compute Gaussian deviates by acceptance-rejection method and \nc tally counts in concentric square annuli. This loop is not \nc vectorizable.\n*/\n\tif (TIMERS_ENABLED == TRUE) timer_start(2);\n\n\tfor ( i = 0; i < NK; i++) {\n x1 = 2.0 * x[2*i] - 1.0;\n x2 = 2.0 * x[2*i+1] - 1.0;\n t1 = pow2(x1) + pow2(x2);\n if (t1 <= 1.0) {\n\t\tt2 = sqrt(-2.0 * log(t1) / t1);\n\t\tt3 = (x1 * t2);\t\t\t\t/* Xi */\n\t\tt4 = (x2 * t2);\t\t\t\t/* Yi */\n\t\tl = max(fabs(t3), fabs(t4));\n\t\tqq[l] += 1.0;\t\t\t\t/* counts */\n\t\tsx = sx + t3;\t\t\t\t/* sum of Xi */\n\t\tsy = sy + t4;\t\t\t\t/* sum of Yi */\n }\n\t}\n\tif (TIMERS_ENABLED == TRUE) timer_stop(2);\n } #pragma omp parallel for copyin(x, qq) private(x1, x2, t1, t2, t3, t4, ik, kk, i, l) reduction(+:sx) reduction(+:sy) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "uble B[500 + 0][500 + 0])\n{\n //int i;\n //int j;\n{\n int c1;\n int c2;\n if (n >= 1) {\n for (c1 = 0; c1 <= n + -1; c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= n + -1; c2++) {\n X[c1][c2] = (((double )c1) * (c2 + 1) + 1) / n;\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ") {\n #pragma omp parallel for private(c1, c2)\n for (c1 = 0; c1 <= n + -1; c1++) {\n for (c2 = 0; c2 <= n + -1; c2++) {\n X[c1][c2] = (((double )c1) * (c2 + 1) + 1) / n;\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c8)", "context_chars": 100, "text": ";\n \n //#pragma scop\n{\n int c0;\n int c2;\n int c8;\n for (c0 = 0; c0 <= 9; c0++) {\n for (c2 = 0; c2 <= 499; c2++) {\n for (c8 = 1; c8 <= 499; c8++) {\n B[c2][c8] = B[c2][c8] - A[c2][c8] * A[c2][c8] / B[c2][c8 - 1];\n }\n for (c8 = 1; c8 <= 499; c8++) {\n X[c2][c8] = X[c2][c8] - X[c2][c8 - 1] * A[c2][c8] / B[c2][c8 - 1];\n }\n for (c8 = 0; c8 <= 497; c8++) {\n X[c2][500 - c8 - 2] = (X[c2][500 - 2 - c8] - X[c2][500 - 2 - c8 - 1] * A[c2][500 - c8 - 3]) / B[c2][500 - 3 - c8];\n }\n } #pragma omp parallel for private(c2, c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "c8] - X[c2][500 - 2 - c8 - 1] * A[c2][500 - c8 - 3]) / B[c2][500 - 3 - c8];\n }\n }\n for (c2 = 0; c2 <= 499; c2++) {\n X[c2][500 - 1] = X[c2][500 - 1] / B[c2][500 - 1];\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c8)", "context_chars": 100, "text": "(c2 = 0; c2 <= 499; c2++) {\n X[c2][500 - 1] = X[c2][500 - 1] / B[c2][500 - 1];\n }\n for (c2 = 0; c2 <= 499; c2++) {\n for (c8 = 1; c8 <= 499; c8++) {\n B[c8][c2] = B[c8][c2] - A[c8][c2] * A[c8][c2] / B[c8 - 1][c2];\n }\n for (c8 = 1; c8 <= 499; c8++) {\n X[c8][c2] = X[c8][c2] - X[c8 - 1][c2] * A[c8][c2] / B[c8 - 1][c2];\n }\n for (c8 = 0; c8 <= 497; c8++) {\n X[500 - 2 - c8][c2] = (X[500 - 2 - c8][c2] - X[500 - c8 - 3][c2] * A[500 - 3 - c8][c2]) / B[500 - 2 - c8][c2];\n }\n } #pragma omp parallel for private(c2, c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "c8][c2] - X[500 - c8 - 3][c2] * A[500 - 3 - c8][c2]) / B[500 - 2 - c8][c2];\n }\n }\n for (c2 = 0; c2 <= 499; c2++) {\n X[500 - 1][c2] = X[500 - 1][c2] / B[500 - 1][c2];\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "int argc, char* argv[])\n{\n int i,j;\n float temp, sum=0.0;\n int len=100;\n\n float u[100][100];\n\n for (i = 0; i < len; i++)\n #pragma omp parallel for\n for (j = 0; j < len; j++)\n u[i][j] = 0.5; \n\n #pragma omp parallel for private(temp) reduction(+:sum)\n for (i = 0; i < len; i++)\n #pragma omp parallel for private(temp) reduction(+:sum)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\n int len=100;\n\n float u[100][100];\n\n #pragma omp parallel for\n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n u[i][j] = 0.5; \n\n #pragma omp parallel for private(temp) reduction(+:sum)\n for (i = 0; i < len; i++)\n #pragma omp parallel for private(temp) reduction(+:sum)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(temp) reduction(+:sum)", "context_chars": 100, "text": "< len; i++)\n #pragma omp parallel for\n for (j = 0; j < len; j++)\n u[i][j] = 0.5; \n\n for (i = 0; i < len; i++)\n #pragma omp parallel for private(temp) reduction(+:sum)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(temp) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(temp) reduction(+:sum)", "context_chars": 100, "text": " = 0.5; \n\n #pragma omp parallel for private(temp) reduction(+:sum)\n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(temp) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB019-plusplus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " outLen = 0;\n\n if (argc>1)\n inLen= atoi(argv[1]);\n\n int input[inLen]; \n int output[inLen];\n\n for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB019-plusplus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for linear(outLen)", "context_chars": 100, "text": " \n int output[inLen];\n\n #pragma omp parallel for\n for (i=0; ifor (i=0; i #pragma omp parallel for linear(outLen)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "n=100;\n\n if (argc>1)\n len = atoi(argv[1]);\n\n int numNodes=len, numNodes2=0; \n int x[len]; \n\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for reduction(-:numNodes2)", "context_chars": 100, "text": "llel for\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n }\n\n for (i=numNodes-1 ; i>-1 ; --i) {\n if (x[i]<=0) {\n numNodes2-- ;\n }\n } #pragma omp parallel for reduction(-:numNodes2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " float temp, sum=0.0;\n int len=100;\n if (argc>1)\n len = atoi(argv[1]);\n float u[len][len];\n\n for (i = 0; i < len; i++)\n #pragma omp parallel for\n for (j = 0; j < len; j++)\n u[i][j] = 0.5;\n\n #pragma omp parallel for private (temp) reduction(+:sum)\n for (i = 0; i < len; i++)\n #pragma omp parallel for private (temp) reduction(+:sum)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "n = atoi(argv[1]);\n float u[len][len];\n\n #pragma omp parallel for\n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n u[i][j] = 0.5;\n\n #pragma omp parallel for private (temp) reduction(+:sum)\n for (i = 0; i < len; i++)\n #pragma omp parallel for private (temp) reduction(+:sum)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp) reduction(+:sum)", "context_chars": 100, "text": " i < len; i++)\n #pragma omp parallel for\n for (j = 0; j < len; j++)\n u[i][j] = 0.5;\n\n for (i = 0; i < len; i++)\n #pragma omp parallel for private (temp) reduction(+:sum)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private (temp) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp) reduction(+:sum)", "context_chars": 100, "text": " = 0.5;\n\n #pragma omp parallel for private (temp) reduction(+:sum)\n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private (temp) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c3, c4, c2)", "context_chars": 100, "text": ")\n{\n //int i;\n //int j;\n{\n int c1;\n int c2;\n int c4;\n int c3;\n if (n >= 1) {\n for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n #pragma omp parallel for private(c2, c3, c4)\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n #pragma omp parallel for private(c3, c4)\n \t for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {\n #pragma omp parallel for private(c4)\n\t for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {\n A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;\n B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;\n }\n }\n }\n } #pragma omp parallel for private(c1, c3, c4, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c3, c4)", "context_chars": 100, "text": " 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n #pragma omp parallel for private(c3, c4)\n \t for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {\n #pragma omp parallel for private(c4)\n\t for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {\n A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;\n B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;\n }\n }\n } #pragma omp parallel for private(c2, c3, c4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c3, c4)", "context_chars": 100, "text": "?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {\n #pragma omp parallel for private(c4)\n\t for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {\n A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;\n B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c3, c4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c4)", "context_chars": 100, "text": " \t for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {\n for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {\n A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;\n B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;\n } #pragma omp parallel for private(c4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c5, c4, c2, c3)", "context_chars": 100, "text": "16 + 1) / 16) : -((-(n + 3 * tsteps + -4) + 16 - 1) / 16))) : (n + 3 * tsteps + -4) / 16)); c0++) {\nfor (c1 = (((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) > (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))?((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) : (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))); c1 <= (((((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) < c0?(((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) : c0)); c1++) {\n for (c2 = ((((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) > 2 * c0 + -2 * c1?(((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) : 2 * c0 + -2 * c1); c2 <= (((((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) < (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16))?(((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) : (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16)))); c2++) {\n if (c0 <= (((32 * c1 + 16 * c2 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 + 1) / 32) : -((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 - 1) / 32))) : (32 * c1 + 16 * c2 + -1 * n + 1) / 32)) && c1 <= c2 + -1) {\n if ((n + 1) % 2 == 0) {\n for (c4 = (16 * c1 > 16 * c2 + -1 * n + 3?16 * c1 : 16 * c2 + -1 * n + 3); c4 <= 16 * c1 + 15; c4++) {\n A[-16 * c2 + c4 + n + -2][n + -2] = B[-16 * c2 + c4 + n + -2][n + -2];\n }\n }\n }\n if (c0 <= (((48 * c1 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(48 * c1 + -1 * n + 1) + 32 + 1) / 32) : -((-(48 * c1 + -1 * n + 1) + 32 - 1) / 32))) : (48 * c1 + -1 * n + 1) / 32)) && c1 >= c2) {\n if ((n + 1) % 2 == 0) {\n for (c5 = (16 * c2 > 16 * c1 + -1 * n + 3?16 * c2 : 16 * c1 + -1 * n + 3); c5 <= ((16 * c1 < 16 * c2 + 15?16 * c1 : 16 * c2 + 15)); c5++) {\n A[n + -2][-16 * c1 + c5 + n + -2] = B[n + -2][-16 * c1 + c5 + n + -2];\n }\n }\n }\n for (c3 = ((((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) > 16 * c0 + -16 * c1?(((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) : 16 * c0 + -16 * c1); c3 <= ((((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) < 16 * c0 + -16 * c1 + 15?((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) : 16 * c0 + -16 * c1 + 15)); c3++) {\n if (c1 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {\n for (c5 = (16 * c2 > 2 * c3 + 1?16 * c2 : 2 * c3 + 1); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {\n B[1][-2 * c3 + c5] = 0.2 * (A[1][-2 * c3 + c5] + A[1][-2 * c3 + c5 - 1] + A[1][1 + (-2 * c3 + c5)] + A[1 + 1][-2 * c3 + c5] + A[1 - 1][-2 * c3 + c5]);\n }\n }\n for (c4 = (16 * c1 > 2 * c3 + 2?16 * c1 : 2 * c3 + 2); c4 <= ((16 * c1 + 15 < 2 * c3 + n + -2?16 * c1 + 15 : 2 * c3 + n + -2)); c4++) {\n if (c2 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {\n B[-2 * c3 + c4][1] = 0.2 * (A[-2 * c3 + c4][1] + A[-2 * c3 + c4][1 - 1] + A[-2 * c3 + c4][1 + 1] + A[1 + (-2 * c3 + c4)][1] + A[-2 * c3 + c4 - 1][1]);\n }\n for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {\n B[-2 * c3 + c4][-2 * c3 + c5] = 0.2 * (A[-2 * c3 + c4][-2 * c3 + c5] + A[-2 * c3 + c4][-2 * c3 + c5 - 1] + A[-2 * c3 + c4][1 + (-2 * c3 + c5)] + A[1 + (-2 * c3 + c4)][-2 * c3 + c5] + A[-2 * c3 + c4 - 1][-2 * c3 + c5]);\n A[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1] = B[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1];\n }\n if (c2 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {\n A[-2 * c3 + c4 + -1][n + -2] = B[-2 * c3 + c4 + -1][n + -2];\n }\n }\n if (c1 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {\n for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -1?16 * c2 + 15 : 2 * c3 + n + -1)); c5++) {\n A[n + -2][-2 * c3 + c5 + -1] = B[n + -2][-2 * c3 + c5 + -1];\n }\n }\n }\n if (c0 >= (((2 * c1 + c2 + -1) * 2 < 0?-(-(2 * c1 + c2 + -1) / 2) : ((2 < 0?(-(2 * c1 + c2 + -1) + - 2 - 1) / - 2 : (2 * c1 + c2 + -1 + 2 - 1) / 2)))) && c1 >= c2 + 1 && c2 <= (((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8))) {\n for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < 16 * c2 + n + 12?16 * c1 + 15 : 16 * c2 + n + 12)); c4++) {\n B[-16 * c2 + c4 + -14][1] = 0.2 * (A[-16 * c2 + c4 + -14][1] + A[-16 * c2 + c4 + -14][1 - 1] + A[-16 * c2 + c4 + -14][1 + 1] + A[1 + (-16 * c2 + c4 + -14)][1] + A[-16 * c2 + c4 + -14 - 1][1]);\n }\n }\n if (c0 >= (((3 * c1 + -1) * 2 < 0?-(-(3 * c1 + -1) / 2) : ((2 < 0?(-(3 * c1 + -1) + - 2 - 1) / - 2 : (3 * c1 + -1 + 2 - 1) / 2)))) && c1 <= (((((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) < c2?(((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) : c2))) {\n for (c5 = (16 * c2 > 16 * c1 + 15?16 * c2 : 16 * c1 + 15); c5 <= ((16 * c2 + 15 < 16 * c1 + n + 12?16 * c2 + 15 : 16 * c1 + n + 12)); c5++) {\n B[1][-16 * c1 + c5 + -14] = 0.2 * (A[1][-16 * c1 + c5 + -14] + A[1][-16 * c1 + c5 + -14 - 1] + A[1][1 + (-16 * c1 + c5 + -14)] + A[1 + 1][-16 * c1 + c5 + -14] + A[1 - 1][-16 * c1 + c5 + -14]);\n }\n }\n }\n } #pragma omp parallel for private(c1, c5, c4, c2, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "t main(int argc,char *argv[])\n{\n int i;\n int j;\n double a[20][20];\n memset(a,0,(sizeof(a)));\n\n for (i = 0; i < 20; i += 1) {\n #pragma omp parallel for\n for (j = 0; j < 20; j += 1) {\n a[i][j] += i + j + 0.1;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "20][20];\n memset(a,0,(sizeof(a)));\n\n #pragma omp parallel for\n for (i = 0; i < 20; i += 1) {\n for (j = 0; j < 20; j += 1) {\n a[i][j] += i + j + 0.1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "j < 20; j += 1) {\n a[i][j] += i + j + 0.1;\n }\n }\n \n for (i = 0; i < 20 -1; i += 1) {\n for (j = 0; j < 20; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB112-linear-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n*/\n#include \nint main()\n{\n int len=100;\n double a[len], b[len], c[len];\n int i,j=0;\n\n for (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB112-linear-orig-no.c", "omp_pragma_line": "#pragma omp parallel for linear(j)", "context_chars": 100, "text": "en;i++)\n {\n a[i]=((double)i)/2.0; \n b[i]=((double)i)/3.0; \n c[i]=((double)i)/7.0; \n }\n\n for (i=0;i #pragma omp parallel for linear(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB109-orderedmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x)", "context_chars": 100, "text": " * Missing the ordered clause\n * Data race pair: x@56:5 vs. x@56:5\n * */\nint main()\n{\n int x =0;\n for (int i = 0; i < 100; ++i) {\n x++;\n } #pragma omp parallel for reduction(+:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB008-indirectaccess4-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "e * xa1 = base;\n double * xa2 = xa1 + 12;\n int i;\n\n // initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB110-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x)", "context_chars": 100, "text": "niv.\n * Proper user of ordered directive and clause, no data races\n * */\nint main()\n{\n int x =0;\n for (int i = 0; i < 100; ++i) {\n x++;\n } #pragma omp parallel for reduction(+:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB101-task-value-orig-no.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "#define MYLEN 100\nint a[MYLEN];\n\nvoid gen_task(int i)\n{\n a[i]= i+1;\n}\n\nint main()\n{\n int i=0;\n for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j )", "context_chars": 100, "text": "lelized so no race condition.\n*/\nint n=100, m=100;\ndouble b[100][100];\n\nint init()\n{\n int i,j,k;\n for (i = 0; i < n; i++) {\n #pragma omp parallel for private(j )\n for (j = 0; j < m; j++) {\n b[i][j] = i * j;\n }\n } #pragma omp parallel for private(i ,j )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j )", "context_chars": 100, "text": "int init()\n{\n int i,j,k;\n #pragma omp parallel for private(i ,j )\n for (i = 0; i < n; i++) {\n for (j = 0; j < m; j++) {\n b[i][j] = i * j;\n } #pragma omp parallel for private(j )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j )", "context_chars": 100, "text": "j < m; j++) {\n b[i][j] = i * j;\n }\n }\n return 0;\n}\n\nvoid foo(int n, int m)\n{\n int i,j;\n for (i=0;i #pragma omp parallel for private(i ,j )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB059-lastprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for lastprivate (x)", "context_chars": 100, "text": " back to the shared one within the last iteration.\n*/\n#include \n\nvoid foo()\n{\n int i,x;\n for (i=0;i<100;i++)\n x=i;\n printf(\"x=%d\",x);\n}\n\nint main()\n{\n foo();\n return 0;\n} #pragma omp parallel for lastprivate (x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c4, c2, c3)", "context_chars": 100, "text": ")\n{\n //int i;\n //int j;\n{\n int c1;\n int c3;\n int c2;\n int c4;\n if (n >= 1) {\n for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n #pragma omp parallel for private(c2, c4, c3)\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n #pragma omp parallel for private(c3, c4)\n \t for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {\n #pragma omp parallel for private(c4)\n\t for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {\n X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;\n A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;\n B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;\n }\n }\n }\n } #pragma omp parallel for private(c1, c4, c2, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c4, c3)", "context_chars": 100, "text": " 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n #pragma omp parallel for private(c3, c4)\n \t for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {\n #pragma omp parallel for private(c4)\n\t for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {\n X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;\n A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;\n B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;\n }\n }\n } #pragma omp parallel for private(c2, c4, c3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c3, c4)", "context_chars": 100, "text": "?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {\n #pragma omp parallel for private(c4)\n\t for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {\n X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;\n A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;\n B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c3, c4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c4)", "context_chars": 100, "text": " \t for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {\n for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {\n X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;\n A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;\n B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;\n } #pragma omp parallel for private(c4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c15, c9, c8)", "context_chars": 100, "text": "= 1 && tsteps >= 1) {\n for (c0 = 0; c0 <= tsteps + -1; c0++) {\n if (n >= 2) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n B[c15][c9] = B[c15][c9] - A[c15][c9] * A[c15][c9] / B[c15][c9 - 1];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][c9] = X[c15][c9] - X[c15][c9 - 1] * A[c15][c9] / B[c15][c9 - 1];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {\n for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][n - c9 - 2] = (X[c15][n - 2 - c9] - X[c15][n - 2 - c9 - 1] * A[c15][n - c9 - 3]) / B[c15][n - 3 - c9];\n }\n }\n }\n } #pragma omp parallel for private(c2, c15, c9, c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c15)", "context_chars": 100, "text": "/ B[c15][n - 3 - c9];\n }\n }\n }\n }\n }\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n #pragma omp parallel for\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];\n }\n } #pragma omp parallel for private(c2, c15)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c15, c9, c8)", "context_chars": 100, "text": "[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];\n }\n }\n if (n >= 2) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n B[c9][c15] = B[c9][c15] - A[c9][c15] * A[c9][c15] / B[c9 - 1][c15];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c9][c15] = X[c9][c15] - X[c9 - 1][c15] * A[c9][c15] / B[c9 - 1][c15];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {\n for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[n - 2 - c9][c15] = (X[n - 2 - c9][c15] - X[n - c9 - 3][c15] * A[n - 3 - c9][c15]) / B[n - 2 - c9][c15];\n }\n }\n }\n } #pragma omp parallel for private(c2, c15, c9, c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c15)", "context_chars": 100, "text": "/ B[n - 2 - c9][c15];\n }\n }\n }\n }\n }\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n #pragma omp parallel for\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15];\n }\n } #pragma omp parallel for private(c2, c15)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB067-restrictpointer1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "double real8;\n\nvoid foo(real8 * restrict newSxx, real8 * restrict newSyy, int length)\n{\n int i;\n\n for (i = 0; i <= length - 1; i += 1) {\n newSxx[i] = 0.0;\n newSyy[i] = 0.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB024-simdtruedep-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "clude \nint main(int argc, char* argv[])\n{\n int i;\n int len=100;\n int a[100], b[100];\n\n for (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB091-threadprivate2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:sum1)", "context_chars": 100, "text": "for (i=0;i #pragma omp parallel for reduction (+:sum1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "vs. Read_set is a data race pair.\n*/\n#include \nint a[100][100];\nint main()\n{\n int i, j;\n for (i = 0; i < 100; i++)\n #pragma omp parallel for\n for (j = 0; j < 100; j++)\n {\n a[i][j] = i + j;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt a[100][100];\nint main()\n{\n int i, j;\n #pragma omp parallel for\n for (i = 0; i < 100; i++)\n for (j = 0; j < 100; j++)\n {\n a[i][j] = i + j;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB018-plusplus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt input[1000]; \nint output[1000];\n\nint main()\n{\n int i ;\n int inLen=1000 ; \n int outLen = 0;\n\n for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "n = 2560;\n double sum =0.0, sum2=0.0;\n double a[len], b[len];\n /*Initialize with some values*/\n for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB084-threadprivatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum1)", "context_chars": 100, "text": "=0;\n for (i=1;i<=1000;i++)\n {\n foo (i);\n } \n sum= sum+sum0;\n/* reference calculation */\n for (i=1;i<=1000;i++)\n {\n sum1=sum1+i;\n } #pragma omp parallel for reduction(+:sum1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB049-fprintf-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ain(int argc, char* argv[])\n{\n int i;\n int ret;\n FILE* pfile;\n int len=1000;\n\n int A[1000];\n\n for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB099-targetparallelfor2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " map + array sections derived from pointers\n*/\nvoid foo (double* a, double* b, int N)\n{\n int i; \n for (i=0;i< N ;i++)\n b[i]=a[i]*(double)i;\n}\n\nint main(int argc, char* argv[])\n{\n int i;\n int len = 1000;\n double a[len], b[len];\n\n #pragma omp parallel for\n for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB099-targetparallelfor2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ble)i;\n}\n\nint main(int argc, char* argv[])\n{\n int i;\n int len = 1000;\n double a[len], b[len];\n\n for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB066-pointernoaliasing-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* ) malloc (sizeof (double) * N );\n double * m_nvol = (double* ) malloc (sizeof (double) * N );\n\n for (int i=0; i < N; ++i ) \n { \n m_pdv_sum[ i ] = 0.0;\n m_nvol[ i ] = i*2.5;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB092-threadprivatemissing2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum1)", "context_chars": 100, "text": "=1000;i++)\n {\n sum0=sum0+i;\n } \n }\n sum= sum+sum0;\n\n /* reference calculation */\n for (i=1;i<=1000;i++)\n {\n sum1=sum1+i;\n } #pragma omp parallel for reduction(+:sum1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j )", "context_chars": 100, "text": "l loop parallelization\n*/\n#define N 100\n\ndouble a[N][N],v[N],v_out[N];\n\nint init()\n{\n int i,j,k;\n for (i = 0; i < N; i++) {\n #pragma omp parallel for private(j )\n for (j = 0; j < N; j++) {\n a[i][j] = i * j + 0.01;\n }\n v_out[i] = i * j + 0.01;\n v[i] = i * j + 0.01;\n } #pragma omp parallel for private(i ,j )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j )", "context_chars": 100, "text": "int init()\n{\n int i,j,k;\n #pragma omp parallel for private(i ,j )\n for (i = 0; i < N; i++) {\n for (j = 0; j < N; j++) {\n a[i][j] = i * j + 0.01;\n } #pragma omp parallel for private(j )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "t[i] = i * j + 0.01;\n v[i] = i * j + 0.01;\n }\n return 0;\n}\n\nint mv()\n{ \n int i,j;\n for (i = 0; i < N; i++)\n { \n double sum = 0.0;\n #pragma omp parallel for reduction(+:sum)\n for (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } \n v_out[i] = sum;\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "gma omp parallel for private (i,j)\n for (i = 0; i < N; i++)\n { \n double sum = 0.0;\n for (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB094-doall2-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ed(n) is an OpenMP 4.5 addition. \n*/\n#include \nint a[100][100];\nint main()\n{\n int i, j;\n for (i = 0; i < 100; i++)\n #pragma omp parallel for\n for (j = 0; j < 100; j++)\n {\n a[i][j] = i + j;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB094-doall2-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt a[100][100];\nint main()\n{\n int i, j;\n #pragma omp parallel for\n for (i = 0; i < 100; i++)\n for (j = 0; j < 100; j++)\n {\n a[i][j] = i + j;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "ouble B[500 + 0][500 + 0])\n{\n //int i;\n //int j;\n{\n int c2;\n int c1;\n if (n >= 1) {\n for (c1 = 0; c1 <= n + -1; c1++) {\n\t#pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= n + -1; c2++) {\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " (n >= 1) {\n #pragma omp parallel for private(c1, c2)\n for (c1 = 0; c1 <= n + -1; c1++) {\n\tfor (c2 = 0; c2 <= n + -1; c2++) {\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "[(-2 * c0 + 3 * c2 + 2) / 3] + A[1 - 1][(-2 * c0 + 3 * c2 + 2) / 3]);\n }\n }\n }\nfor (c1 = ((((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) > c0 + -9?(((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) : c0 + -9); c1 <= (((((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) < c0?(((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) : c0)); c1++) {\n B[-2 * c0 + 3 * c1][1] = 0.2 * (A[-2 * c0 + 3 * c1][1] + A[-2 * c0 + 3 * c1][1 - 1] + A[-2 * c0 + 3 * c1][1 + 1] + A[1 + (-2 * c0 + 3 * c1)][1] + A[-2 * c0 + 3 * c1 - 1][1]);\n for (c2 = 2 * c0 + -2 * c1 + 2; c2 <= 2 * c0 + -2 * c1 + 498; c2++) {\n A[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1] = B[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1];\n B[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] = 0.2 * (A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2 - 1] + A[-2 * c0 + 3 * c1][1 + (-2 * c0 + 2 * c1 + c2)] + A[1 + (-2 * c0 + 3 * c1)][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1 - 1][-2 * c0 + 2 * c1 + c2]);\n }\n A[-2 * c0 + 3 * c1 + -1][498] = B[-2 * c0 + 3 * c1 + -1][498];\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "-2 * c0 + 3 * c1 + -1][498];\n }\n if (c0 >= 499) {\n if ((2 * c0 + 1) % 3 == 0) {\n\t for (c2 = ((2 * c0 + -992) * 3 < 0?-(-(2 * c0 + -992) / 3) : ((3 < 0?(-(2 * c0 + -992) + - 3 - 1) / - 3 : (2 * c0 + -992 + 3 - 1) / 3))); c2 <= (((2 * c0 + 499) * 3 < 0?((3 < 0?-((-(2 * c0 + 499) + 3 + 1) / 3) : -((-(2 * c0 + 499) + 3 - 1) / 3))) : (2 * c0 + 499) / 3)); c2++) {\n A[498][(-2 * c0 + 3 * c2 + 995) / 3] = B[498][(-2 * c0 + 3 * c2 + 995) / 3];\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " 3 * c2 + 995) / 3] = B[498][(-2 * c0 + 3 * c2 + 995) / 3];\n }\n }\n }\n }\n for (c2 = 20; c2 <= 517; c2++) {\n A[498][c2 + -19] = B[498][c2 + -19];\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB065-pireduction-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:pi) private(x)", "context_chars": 100, "text": "le pi = 0.0;\n long int i;\n double x, interval_width;\n interval_width = 1.0/(double)num_steps;\n\n for (i = 0; i < num_steps; i++) {\n x = (i+ 0.5) * interval_width;\n pi += 1.0 / (x*x + 1.0);\n } #pragma omp parallel for reduction(+:pi) private(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "rgv[])\n{\n int i;\n int tmp;\n int len=100;\n if (argc>1)\n len = atoi(argv[1]);\n int a[len];\n\n for (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(tmp)", "context_chars": 100, "text": "len = atoi(argv[1]);\n int a[len];\n\n #pragma omp parallel for\n for (i=0;ifor (i=0;i #pragma omp parallel for private(tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB006-indirectaccess2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "e * xa1 = base;\n double * xa2 = xa1 + 12;\n int i;\n\n // initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\nArrays passed as function parameters\n*/\nvoid foo1(double o1[], double c[], int len)\n{ \n int i ;\n for (i = 0; i < len; ++i) {\n double volnew_o8 = 0.5 * c[i];\n o1[i] = volnew_o8;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "= 0.5 * c[i];\n o1[i] = volnew_o8;\n } \n}\n\ndouble o1[100];\ndouble c[100];\nint main()\n{\n int i;\n for (i = 0; i < 100; ++i) {\n c[i] = i + 1.01;\n o1[i] = i + 1.01;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "vate. \n*/\nint main()\n{\n int len=100;\n double a[len][len], b[len][len], c[len][len];\n int i,j;\n\n for (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "n][len], b[len][len], c[len][len];\n int i,j;\n\n #pragma omp parallel for\n for (i=0;ifor (j=0;j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "t(shared) to cover another option.\n*/\n\nint a[100][100];\nint b[100][100];\nint main()\n{\n int i,j;\n\n for (i=0;i<100;i++) {\n #pragma omp parallel for private(j ) \n for (j=0;j<100;j++) {\n a[i][j] = i;\n b[i][j] = i;\n }\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "0];\nint main()\n{\n int i,j;\n\n #pragma omp parallel for private(i ,j ) \n for (i=0;i<100;i++) {\n for (j=0;j<100;j++) {\n a[i][j] = i;\n b[i][j] = i;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,xx,yy)", "context_chars": 100, "text": " xx, yy;\n\n dx = 2.0 / (n - 1);\n dy = 2.0 / (m - 1);\n\n/* Initialize initial condition and RHS */\n//#pragma omp parallel for private(xx ,yy) \n for (i = 0; i < n; i++)\n #pragma omp parallel for private(xx ,yy) \n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(i,j,xx,yy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(xx ,yy) ", "context_chars": 100, "text": "m - 1);\n\n/* Initialize initial condition and RHS */\n//#pragma omp parallel for private(i,j,xx,yy)\n for (i = 0; i < n; i++)\n #pragma omp parallel for private(xx ,yy) \n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(xx ,yy) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(xx ,yy) ", "context_chars": 100, "text": "el for private(i,j,xx,yy)\n #pragma omp parallel for private(xx ,yy) \n for (i = 0; i < n; i++)\n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(xx ,yy) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "l;\n k = 1;\n\n while (k <= mits)\n {\n error = 0.0;\n\n/* Copy new solution into old */\n for (i = 0; i < n; i++)\n\t #pragma omp parallel for\n for (j = 0; j < m; j++)\n uold[i][j] = u[i][j];\n\n #pragma omp parallel for private(resid) reduction(+:error)\n for (i = 1; i < (n - 1); i++)\n\t #pragma omp parallel for private(resid) reduction(+:error)\n for (j = 1; j < (m - 1); j++)\n {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j])\n + ay * (uold[i][j - 1] + uold[i][j + 1]) +\n b * uold[i][j] - f[i][j]) / b;\n\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "* Copy new solution into old */\n #pragma omp parallel for\n for (i = 0; i < n; i++)\n\t for (j = 0; j < m; j++)\n uold[i][j] = u[i][j];\n\n #pragma omp parallel for private(resid) reduction(+:error)\n for (i = 1; i < (n - 1); i++)\n\t #pragma omp parallel for private(resid) reduction(+:error)\n for (j = 1; j < (m - 1); j++)\n {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j])\n + ay * (uold[i][j - 1] + uold[i][j + 1]) +\n b * uold[i][j] - f[i][j]) / b;\n\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(resid) reduction(+:error)", "context_chars": 100, "text": "ragma omp parallel for\n for (j = 0; j < m; j++)\n uold[i][j] = u[i][j];\n\n for (i = 1; i < (n - 1); i++)\n\t #pragma omp parallel for private(resid) reduction(+:error)\n for (j = 1; j < (m - 1); j++)\n {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j])\n + ay * (uold[i][j - 1] + uold[i][j + 1]) +\n b * uold[i][j] - f[i][j]) / b;\n\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n } #pragma omp parallel for private(resid) reduction(+:error)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(resid) reduction(+:error)", "context_chars": 100, "text": "#pragma omp parallel for private(resid) reduction(+:error)\n for (i = 1; i < (n - 1); i++)\n\t for (j = 1; j < (m - 1); j++)\n {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j])\n + ay * (uold[i][j - 1] + uold[i][j + 1]) +\n b * uold[i][j] - f[i][j]) / b;\n\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n } #pragma omp parallel for private(resid) reduction(+:error)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB070-simd1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "y computation with a vetorization directive\n*/\nint a[100], b[100], c[100];\n\nint main()\n{\n int i;\n for (i=0;i<100;i++) {\n a[i]= i * 40;\n b[i] = i - 1;\n c[i] = i;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB070-simd1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "el for private(i ) \n for (i=0;i<100;i++) {\n a[i]= i * 40;\n b[i] = i - 1;\n c[i] = i;\n }\n\n for (i=0;i<100;i++)\n a[i]=b[i]*c[i];\n\n for (i=0;i<100;i++) {\n printf(\"%d %d %d\\n\", a[i], b[i], c[i]);\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "clude\n\nint main(int argc, char* argv[])\n{\n int i;\n int len=100;\n int a[len], b[len];\n\n for (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "evel parallelization.\n*/\n#define N 1000\ndouble a[N][N],v[N],v_out[N];\n\n\nint init()\n{\n int i,j,k;\n for (i = 0; i < N; i++) {\n #pragma omp parallel for private(j ) \n for (j = 0; j < N; j++) {\n a[i][j] = i * j + 0.01;\n }\n v_out[i] = i * j + 0.01;\n v[i] = i * j + 0.01;\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "nt init()\n{\n int i,j,k;\n #pragma omp parallel for private(i ,j ) \n for (i = 0; i < N; i++) {\n for (j = 0; j < N; j++) {\n a[i][j] = i * j + 0.01;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "[i] = i * j + 0.01;\n v[i] = i * j + 0.01;\n }\n return 0;\n}\n\nvoid mv()\n{ \n int i,j;\n for (i = 0; i < N; i++)\n { \n double sum = 0.0;\n #pragma omp parallel for reduction(+:sum)\n for (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } \n v_out[i] = sum;\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum)", "context_chars": 100, "text": "a omp parallel for private(i ,j ) \n for (i = 0; i < N; i++)\n { \n double sum = 0.0;\n for (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } #pragma omp parallel for reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB111-linearmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " j@67:7 vs. j@68:5 \n*/\nint main()\n{\n int len=100;\n double a[len], b[len], c[len];\n int i,j=0;\n\n for (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB111-linearmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for linear(j)", "context_chars": 100, "text": "en;i++)\n {\n a[i]=((double)i)/2.0; \n b[i]=((double)i)/3.0; \n c[i]=((double)i)/7.0; \n }\n\n for (i=0;i #pragma omp parallel for linear(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "de \nint main(int argc, char* argv[])\n{\n int i;\n int tmp;\n int len=100;\n int a[100];\n\n for (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(tmp)", "context_chars": 100, "text": " tmp;\n int len=100;\n int a[100];\n\n #pragma omp parallel for\n for (i=0;ifor (i=0;i #pragma omp parallel for private(tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " SUCH DAMAGE.\n*/\n\n\n/*\nExample use of firstprivate()\n*/\nvoid foo(int * a, int n, int g)\n{\n int i;\n for (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "loop can be parallelized. \n \n*/\nint n=100, m=100;\ndouble b[100][100];\n\nint init()\n{\n int i,j,k;\n for (i = 0; i < n; i++) {\n #pragma omp parallel for private(j ) \n for (j = 0; j < m; j++) {\n b[i][j] = i * j;\n }\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "nt init()\n{\n int i,j,k;\n #pragma omp parallel for private(i ,j ) \n for (i = 0; i < n; i++) {\n for (j = 0; j < m; j++) {\n b[i][j] = i * j;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "for (j = 0; j < m; j++) {\n b[i][j] = i * j;\n }\n }\n return 0;\n}\n\nvoid foo()\n{\n int i,j;\n for (i=0;i #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "ble D[128 + 0][128 + 0])\n{\n //int i;\n //int j;\n{\n int c2;\n int c1;\n if (nl >= 1) {\n for (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= ((((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) < nm + -1?((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nl; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nl > nm?nl : nm); c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= ((((nk + -1 < nl + -1?nk + -1 : nl + -1)) < nm + -1?((nk + -1 < nl + -1?nk + -1 : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nj > nl?nj : nl); c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nj > nm?nj : nm); c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nk > nl?nk : nl); c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n\t#pragma omp parallel for private(c2)\n for (c2 = (nk > nm?nk : nm); c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n\t#pragma omp parallel for private(c2)\n for (c2 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n\t#pragma omp parallel for private(c2)\n for (c2 = (nj > nk?nj : nk); c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) < nm + -1?((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "(double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nl > nm?nl : nm); c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((((nk + -1 < nl + -1?nk + -1 : nl + -1)) < nm + -1?((nk + -1 < nl + -1?nk + -1 : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nj > nl?nj : nl); c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nj > nm?nj : nm); c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "nl)) : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nk > nl?nk : nl); c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n\tfor (c2 = (nk > nm?nk : nm); c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n\tfor (c2 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " nl)) : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n\tfor (c2 = (nj > nk?nj : nk); c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": ") {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nl <= 0) {\nfor (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n\t#pragma omp parallel for private(c2)\n for (c2 = (nj > nm?nj : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nk > nm?nk : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nj > nk?nj : nk); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "j + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n\tfor (c2 = (nj > nm?nj : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "m?nj : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nk > nm?nk : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = (nj > nk?nj : nk); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nm >= 1) {\n for (c1 = nm; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "r (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": ") {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nm <= 0) {\nfor (c1 = 0; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n\t#pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n\tfor (c2 = 0; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "r (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nj >= 1 && nl >= 1) {\n for (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n\tfor (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nl; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nj > nl?nj : nl); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nj > nl?nj : nl); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "l?nj : nl); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nj >= 1 && nl <= 0) {\n for (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": ") {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nj >= 1) {\n for (c1 = (nj > nm?nj : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ")\n for (c1 = (nj > nm?nj : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": " A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nj <= 0 && nl >= 1) {\n for (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "r (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nj <= 0 && nl <= 0) {\n for (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": ") {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nj <= 0) {\n for (c1 = (0 > nm?0 : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n\t#pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "te(c1, c2)\n for (c1 = (0 > nm?0 : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n\tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": " A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\n for (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nk > nl?nk : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nk; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nk > nl?nk : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl <= 0) {\n for (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk >= 1 && nm >= 1) {\n for (c1 = (nk > nm?nk : nm); c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ")\n for (c1 = (nk > nm?nk : nm); c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n for (c2 = 0; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "r (c2 = nm; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk >= 1 && nm <= 0) {\n for (c1 = nk; c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " private(c1, c2)\n for (c1 = nk; c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": " A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\n for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ")\n for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "r (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl <= 0) {\n for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ")\n for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": ") {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nk >= 1) {\n for (c1 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c1 <= ni + -1; c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " for (c1 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c1 <= ni + -1; c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": ") {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nl >= 1) {\n for (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nl; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nl > nm?nl : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nj > nl?nj : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = (nj > nm?nj : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nl > nm?nl : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nj > nl?nj : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nj > nm?nj : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nl <= 0) {\n for (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "= nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nm >= 1) {\n for (c1 = (ni > nm?ni : nm); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ")\n for (c1 = (ni > nm?ni : nm); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": " B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nm <= 0) {\n for (c1 = (0 > ni?0 : ni); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "c2)\n for (c1 = (0 > ni?0 : ni); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nj >= 1 && nl >= 1) {\n for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nl; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nj; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ")\n for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "= nl; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nj >= 1 && nl <= 0) {\n for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ")\n for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": " B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nj >= 1) {\n for (c1 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c1 <= nk + -1; c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " for (c1 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c1 <= nk + -1; c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\n for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ")\n for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "= nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl <= 0) {\n for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": ")\n for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk >= 1 && nm >= 1) {\n for (c1 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c1 <= nj + -1; c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " for (c1 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c1 <= nj + -1; c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk <= 0 && nl >= 1) {\n for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2)\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "r private(c1, c2)\n for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "= nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk <= 0 && nl <= 0) {\n for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "r private(c1, c2)\n for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk <= 0 && nm >= 1) {\n for (c1 = nm; c1 <= nj + -1; c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "{\n #pragma omp parallel for private(c1, c2)\n for (c1 = nm; c1 <= nj + -1; c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nj <= 0 && nl >= 1) {\n for (c1 = (0 > ni?0 : ni); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "c2)\n for (c1 = (0 > ni?0 : ni); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\n for (c1 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c1 <= nm + -1; c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " for (c1 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c1 <= nm + -1; c1++) {\n for (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk <= 0 && nl >= 1) {\n for (c1 = (0 > nj?0 : nj); c1 <= nm + -1; c1++) {\n #pragma omp parallel for private(c2)\n \tfor (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "ma omp parallel for private(c1, c2)\n for (c1 = (0 > nj?0 : nj); c1 <= nm + -1; c1++) {\n for (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "])\n{\n //int i;\n //int j;\n //int k;\n \n //#pragma scop\n{\n int c1;\n int c2;\n int c5;\n for (c1 = 0; c1 <= 127; c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= 127; c2++) {\n G[c1][c2] = 0;\n F[c1][c2] = 0;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": "\n int c5;\n #pragma omp parallel for private(c1, c2)\n for (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n G[c1][c2] = 0;\n F[c1][c2] = 0;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c5, c2)", "context_chars": 100, "text": " for (c2 = 0; c2 <= 127; c2++) {\n G[c1][c2] = 0;\n F[c1][c2] = 0;\n }\n }\n for (c1 = 0; c1 <= 127; c1++) {\n #pragma omp parallel for private(c2, c5)\n for (c2 = 0; c2 <= 127; c2++) {\n #pragma omp parallel for private(c5)\n for (c5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n }\n }\n } #pragma omp parallel for private(c1, c5, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2, c5)", "context_chars": 100, "text": " }\n }\n #pragma omp parallel for private(c1, c5, c2)\n for (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n #pragma omp parallel for private(c5)\n for (c5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n }\n } #pragma omp parallel for private(c2, c5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5)", "context_chars": 100, "text": "1++) {\n #pragma omp parallel for private(c2, c5)\n for (c2 = 0; c2 <= 127; c2++) {\n for (c5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n } #pragma omp parallel for private(c5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c2)", "context_chars": 100, "text": "5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n }\n }\n }\n for (c1 = 0; c1 <= 127; c1++) {\n #pragma omp parallel for private(c2)\n for (c2 = 0; c2 <= 127; c2++) {\n E[c1][c2] = 0;\n }\n } #pragma omp parallel for private(c1, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2)", "context_chars": 100, "text": " }\n }\n #pragma omp parallel for private(c1, c2)\n for (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n E[c1][c2] = 0;\n } #pragma omp parallel for private(c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1, c5, c2)", "context_chars": 100, "text": "llel for private(c2)\n for (c2 = 0; c2 <= 127; c2++) {\n E[c1][c2] = 0;\n }\n }\n for (c1 = 0; c1 <= 127; c1++) {\n #pragma omp parallel for private(c5, c2)\n for (c2 = 0; c2 <= 127; c2++) {\n\t#pragma omp parallel for private(c5)\n for (c5 = 0; c5 <= 127; c5++) {\n E[c1][c2] += A[c1][c5] * B[c5][c2];\n }\n #pragma omp parallel for private(c5)\n for (c5 = 0; c5 <= 127; c5++) {\n G[c1][c5] += E[c1][c2] * F[c2][c5];\n }\n }\n } #pragma omp parallel for private(c1, c5, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5, c2)", "context_chars": 100, "text": " }\n }\n #pragma omp parallel for private(c1, c5, c2)\n for (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n\t#pragma omp parallel for private(c5)\n for (c5 = 0; c5 <= 127; c5++) {\n E[c1][c2] += A[c1][c5] * B[c5][c2];\n }\n #pragma omp parallel for private(c5)\n for (c5 = 0; c5 <= 127; c5++) {\n G[c1][c5] += E[c1][c2] * F[c2][c5];\n }\n } #pragma omp parallel for private(c5, c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5)", "context_chars": 100, "text": " 127; c1++) {\n #pragma omp parallel for private(c5, c2)\n for (c2 = 0; c2 <= 127; c2++) {\n\tfor (c5 = 0; c5 <= 127; c5++) {\n E[c1][c2] += A[c1][c5] * B[c5][c2];\n } #pragma omp parallel for private(c5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5)", "context_chars": 100, "text": " for (c5 = 0; c5 <= 127; c5++) {\n E[c1][c2] += A[c1][c5] * B[c5][c2];\n }\n for (c5 = 0; c5 <= 127; c5++) {\n G[c1][c5] += E[c1][c2] * F[c2][c5];\n } #pragma omp parallel for private(c5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB013-nowait-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "]@75:13.\n*/\n\n#include \nint main()\n{\n int i,error;\n int len = 1000;\n int a[len], b=5;\n\n for (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "e \n\nint main(int argc,char *argv[])\n{\n int i, j;\n int len = 20; \n\n double a[20][20];\n\n for (i=0; i< len; i++)\n #pragma omp parallel for\n for (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ", j;\n int len = 20; \n\n double a[20][20];\n\n #pragma omp parallel for\n for (i=0; i< len; i++)\n for (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "allel for\n for (j=0; jfor (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB005-indirectaccess1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "e * xa1 = base;\n double * xa2 = xa1 + 12;\n int i;\n\n // initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "define K 100\ndouble a[N][M],b[M][K],c[N][K];\n \nint init() \n{ \n int i,j,k;\n for (i = 0; i < N; i++) \n #pragma omp parallel for\n for (j = 0; j < M; j++) {\n a[i][j] = (double)i * j;\n b[i][j] = (double)i * j;\n c[i][j] = (double)i * j;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\nint init() \n{ \n int i,j,k;\n #pragma omp parallel for \n for (i = 0; i < N; i++) \n for (j = 0; j < M; j++) {\n a[i][j] = (double)i * j;\n b[i][j] = (double)i * j;\n c[i][j] = (double)i * j;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j,k)", "context_chars": 100, "text": "* j;\n c[i][j] = (double)i * j;\n }\n return 0; \n} \n\nint mmm() \n{ \n int i,j,k;\n for (i = 0; i < N; i++) \n for (k = 0; k < K; k++) \n for (j = 0; j < M; j++)\n c[i][j]= c[i][j]+a[i][k]*b[k][j];\n return 0; \n} \n\nint print() \n{ \n int i,j,k;\n for (i = 0; i < N; i++) \n for (j = 0; j < M; j++)\n printf(\"%lf %lf %lf\\n\", c[i][j],a[i][j],b[i][j]);\n return 0; \n} #pragma omp parallel for private(j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB085-threadprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum1)", "context_chars": 100, "text": " } \n#pragma omp critical\n {\n sum= sum+sum0;\n } \n } \n/* reference calculation */\n for (i=0;i #pragma omp parallel for reduction(+:sum1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB007-indirectaccess3-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "e * xa1 = base;\n double * xa2 = xa1 + 12;\n int i;\n\n // initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n{\n int i;\n int len=100;\n\n int numNodes=len, numNodes2=0; \n int x[100]; \n\n // initialize x[]\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for reduction(-:numNodes2)", "context_chars": 100, "text": "llel for\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n }\n\n for (i=numNodes-1 ; i>-1 ; --i) {\n if (x[i]<=0) {\n numNodes2-- ;\n }\n } #pragma omp parallel for reduction(-:numNodes2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB025-simdtruedep-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " argv[])\n{\n int i;\n int len=100;\n\n if (argc>1)\n len = atoi(argv[1]);\n\n int a[len], b[len];\n for (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB052-indirectaccesssharebase-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "xecution. \\n\");\n return 1; \n }\n\n double * xa1 = base;\n double * xa2 = base + 12;\n int i;\n\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB052-indirectaccesssharebase-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 12;\n int i;\n\n #pragma omp parallel for\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.0;\n }\n\n for (i =0; i< N; ++i) // this level of loop has no loop carried dependence\n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0;\n xa2[idx]+= 3.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB068-restrictpointer2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "e \n\n\nvoid init(int n, int * restrict a, int * restrict b, int * restrict c)\n{\n int i;\n for (i = 0; i < n; i++) {\n a[i] = 1;\n b[i] = i;\n c[i] = i * i; \n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB068-restrictpointer2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " * i; \n }\n}\n\nvoid foo(int n, int * restrict a, int * restrict b, int * restrict c)\n{\n int i;\n for (i = 0; i < n; i++)\n a[i] = b[i] + c[i]; \n}\n\nvoid print(int n, int * restrict a, int * restrict b, int * restrict c)\n{\n int i;\n for (i = 0; i < n; i++) {\n printf(\"%d %d %d\\n\", a[i], b[i], c[i]);\n }\n} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB057-jacobiinitialize-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,xx,yy)", "context_chars": 100, "text": "x, yy;\n\n dx = 2.0 / (n - 1);\n dy = 2.0 / (m - 1);\n\n /* Initialize initial condition and RHS */\n for (i = 0; i < n; i++)\n #pragma omp parallel for private(j,xx,yy)\n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(i,j,xx,yy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB057-jacobiinitialize-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j,xx,yy)", "context_chars": 100, "text": "al condition and RHS */\n #pragma omp parallel for private(i,j,xx,yy)\n for (i = 0; i < n; i++)\n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(j,xx,yy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "])\n{\n int i, j;\n int len = 20; \n\n if (argc>1)\n len = atoi(argv[1]);\n\n double a[len][len];\n\n for (i=0; i< len; i++)\n #pragma omp parallel for\n for (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "en = atoi(argv[1]);\n\n double a[len][len];\n\n #pragma omp parallel for\n for (i=0; i< len; i++)\n for (j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "allel for\n for (j=0; jfor (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/utilities/polybench.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tmp)", "context_chars": 100, "text": "zeof(double);\n double* flush = (double*) calloc (cs, sizeof(double));\n int i;\n double tmp = 0.0;\nfor (i = 0; i < cs; i++)\n tmp += flush[i];\n assert (tmp <= 10.0);\n free (flush);\n}\n\n\n#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER\nvoid polybench_linux_fifo_scheduler()\n{\n /* Use FIFO scheduler to limit OS interference. Program must be run\n as root, and this works only for Linux kernels. */\n struct sched_param schedParam;\n schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);\n sched_setscheduler (0, SCHED_FIFO, &schedParam);\n} #pragma omp parallel for reduction(+:tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "initialization. */\nstatic\nvoid init_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n for (i = 0; i < n; i++)\n #pragma omp parallel for\n for (j = 0; j < n; j++)\n C[i][j] = 42;\n}\n\n\n/* DCE code. Must scan the entire live-out data.\n Can be used also to check the correctness of the output. */\nstatic\nvoid print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n #pragma omp parallel for\n for (i = 0; i < n; i++)\n #pragma omp parallel for\n for (j = 0; j < n; j++) {\n\tfprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]);\n\tif (i % 20 == 0) fprintf (stderr, \"\\n\");\n }\n fprintf (stderr, \"\\n\");\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "YPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n #pragma omp parallel for\n for (i = 0; i < n; i++)\n for (j = 0; j < n; j++)\n C[i][j] = 42;\n}\n\n\n/* DCE code. Must scan the entire live-out data.\n Can be used also to check the correctness of the output. */\nstatic\nvoid print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n #pragma omp parallel for\n for (i = 0; i < n; i++)\n #pragma omp parallel for\n for (j = 0; j < n; j++) {\n\tfprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]);\n\tif (i % 20 == 0) fprintf (stderr, \"\\n\");\n }\n fprintf (stderr, \"\\n\");\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f the output. */\nstatic\nvoid print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n for (i = 0; i < n; i++)\n #pragma omp parallel for\n for (j = 0; j < n; j++) {\n\tfprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]);\n\tif (i % 20 == 0) fprintf (stderr, \"\\n\");\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "PE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n #pragma omp parallel for\n for (i = 0; i < n; i++)\n for (j = 0; j < n; j++) {\n\tfprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]);\n\tif (i % 20 == 0) fprintf (stderr, \"\\n\");\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "d return. */\nstatic\nvoid kernel_template(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n for (i = 0; i < _PB_N; i++)\n #pragma omp parallel for\n for (j = 0; j < _PB_N; j++)\n C[i][j] += 42;\n}\n\n\nint main(int argc, char** argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n);\n\n /* Initialize array(s). */\n init_array (n, POLYBENCH_ARRAY(C));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_template (n, POLYBENCH_ARRAY(C));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(C);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "OLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n #pragma omp parallel for\n for (i = 0; i < _PB_N; i++)\n for (j = 0; j < _PB_N; j++)\n C[i][j] += 42;\n}\n\n\nint main(int argc, char** argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n);\n\n /* Initialize array(s). */\n init_array (n, POLYBENCH_ARRAY(C));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_template (n, POLYBENCH_ARRAY(C));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(C);\n\n return 0;\n} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/b+tree/kernel/kernel_cpu_2.c", "omp_pragma_line": "#pragma omp parallel for private(i ,thid ) ", "context_chars": 100, "text": "==================150\n\n\t// private thread IDs\n\tint thid;\n\tint bid;\n\n\t// process number of querries\n\tfor(bid = 0; bid < count; bid++){\n\n\t\t// process levels of the tree\n\t\tfor(i = 0; i < maxheight; i++){\n\n\t\t\t// process all leaves at each level\n\t\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\t\tif((knodes[currKnode[bid]].keys[thid] <= start[bid]) && (knodes[currKnode[bid]].keys[thid+1] > start[bid])){\n\t\t\t\t\t// this conditional statement is inserted to avoid crush due to but in original code\n\t\t\t\t\t// \"offset[bid]\" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault\n\t\t\t\t\t// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address\n\t\t\t\t\tif(knodes[currKnode[bid]].indices[thid] < knodes_elem){\n\t\t\t\t\t\toffset[bid] = knodes[currKnode[bid]].indices[thid];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif((knodes[lastKnode[bid]].keys[thid] <= end[bid]) && (knodes[lastKnode[bid]].keys[thid+1] > end[bid])){\n\t\t\t\t\t// this conditional statement is inserted to avoid crush due to but in original code\n\t\t\t\t\t// \"offset_2[bid]\" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault\n\t\t\t\t\t// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address\n\t\t\t\t\tif(knodes[lastKnode[bid]].indices[thid] < knodes_elem){\n\t\t\t\t\t\toffset_2[bid] = knodes[lastKnode[bid]].indices[thid];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// set for next tree level\n\t\t\tcurrKnode[bid] = offset[bid];\n\t\t\tlastKnode[bid] = offset_2[bid];\n\n\t\t}\n\n\t\t// process leaves\n\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\t// Find the index of the starting record\n\t\t\tif(knodes[currKnode[bid]].keys[thid] == start[bid]){\n\t\t\t\trecstart[bid] = knodes[currKnode[bid]].indices[thid];\n\t\t\t}\n\n\t\t}\n\n\t\t// process leaves\n\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\t// Find the index of the ending record\n\t\t\tif(knodes[lastKnode[bid]].keys[thid] == end[bid]){\n\t\t\t\treclength[bid] = knodes[lastKnode[bid]].indices[thid] - recstart[bid]+1;\n\t\t\t}\n\n\t\t}\n\n\t} #pragma omp parallel for private(i ,thid ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/b+tree/kernel/kernel_cpu.c", "omp_pragma_line": "#pragma omp parallel for private(i ,thid ) ", "context_chars": 100, "text": "==========150\n\n\t// private thread IDs\n\tint thid;\n\tint bid;\n\tint i;\n\n\t// process number of querries\n\tfor(bid = 0; bid < count; bid++){\n\n\t\t// process levels of the tree\n\t\tfor(i = 0; i < maxheight; i++){\n\n\t\t\t// process all leaves at each level\n\t\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\t\t// if value is between the two keys\n\t\t\t\tif((knodes[currKnode[bid]].keys[thid]) <= keys[bid] && (knodes[currKnode[bid]].keys[thid+1] > keys[bid])){\n\t\t\t\t\t// this conditional statement is inserted to avoid crush due to but in original code\n\t\t\t\t\t// \"offset[bid]\" calculated below that addresses knodes[] in the next iteration goes outside of its bounds cause segmentation fault\n\t\t\t\t\t// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address\n\t\t\t\t\tif(knodes[offset[bid]].indices[thid] < knodes_elem){\n\t\t\t\t\t\toffset[bid] = knodes[offset[bid]].indices[thid];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// set for next tree level\n\t\t\tcurrKnode[bid] = offset[bid];\n\n\t\t}\n\n\t\t//At this point, we have a candidate leaf node which may contain\n\t\t//the target record. Check each key to hopefully find the record\n\t\t// process all leaves at each level\n\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\tif(knodes[currKnode[bid]].keys[thid] == keys[bid]){\n\t\t\t\tans[bid].value = records[knodes[currKnode[bid]].indices[thid]].value;\n\t\t\t}\n\n\t\t}\n\n\t} #pragma omp parallel for private(i ,thid ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/hotspot3D/3D.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:err) ", "context_chars": 100, "text": "iter);\n\n}\n\nfloat accuracy(float *arr1, float *arr2, int len)\n{\n float err = 0.0; \n int i;\n for(i = 0; i < len; i++)\n {\n err += (arr1[i]-arr2[i]) * (arr1[i]-arr2[i]);\n } #pragma omp parallel for reduction(+:err) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/hotspot3D/3D.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "rintf(\"%d threads running\\n\", omp_get_num_threads());\n\n do {\n int z; \n for (z = 0; z < nz; z++) {\n int y;\n for (y = 0; y < ny; y++) {\n int x;\n for (x = 0; x < nx; x++) {\n int c, w, e, n, s, b, t;\n c = x + y * nx + z * nx * ny;\n w = (x == 0) ? c : c - 1;\n e = (x == nx-1) ? c : c + 1;\n n = (y == 0) ? c : c - nx;\n s = (y == ny-1) ? c : c + nx;\n b = (z == 0) ? c : c - nx * ny;\n t = (z == nz-1) ? c : c + nx * ny;\n tOut_t[c] = cc * tIn_t[c] + cw * tIn_t[w] + ce * tIn_t[e]\n + cs * tIn_t[s] + cn * tIn_t[n] + cb * tIn_t[b] + ct * tIn_t[t]+(dt/Cap) * pIn[c] + ct*amb_temp;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\t\t\t\t\t\t\t// updated row coordinates\n\t\tprivate[i].d_T = public.d_endoT;\t\t\t\t\t\t\t\t\t\t\t\t\t\t// templates\n\t}\n\n\tfor(i=public.endoPoints; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "lic.conv_cols;\t\t\t\t\t\t\t\t\t\t\t\t// number of elements\n\tpublic.conv_mem = sizeof(fp) * public.conv_elem;\n\n\tfor(i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ublic.in2_pad_rows * public.in2_pad_cols;\n\tpublic.in2_pad_mem = sizeof(fp) * public.in2_pad_elem;\n\n\tfor(i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "elem = public.tMask_rows * public.tMask_cols;\n\tpublic.tMask_mem = sizeof(fp) * public.tMask_elem;\n\n\tfor(i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "if((public.mask_cols-1) % 2 > 0.5){\n\t\tpublic.mask_conv_joffset = public.mask_conv_joffset + 1;\n\t}\n\n\tfor(i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "====================================================\n\n\t\tomp_set_num_threads(omp_num_threads);\n\t\t\n\n\t\tfor(i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "=================================================================================================\n\n\tfor(i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "or u\nc-------------------------------------------------------------------*/\n\n int i, j, k, m;\n \n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "*/\n\n int i, j, k, m;\n \n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m ) ", "context_chars": 100, "text": "-------------------------------*/\n\n int i, j, k, m, d;\n double xi, eta, zeta, u_exact[5], add;\n\n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n } #pragma omp parallel for private(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(add ,m) ", "context_chars": 100, "text": "0; k < grid_points[2]; k++) {\n\tzeta = (double)k * dnzm1;\n\texact_solution(xi, eta, zeta, u_exact);\n\n\tfor (m = 0; m < 5; m++) {\n\t add = u[i][j][k][m] - u_exact[m];\n\t rms[m] = rms[m] + add*add;\n\t} #pragma omp parallel for private(add ,m) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(d ,m ) ", "context_chars": 100, "text": "; m++) {\n\t add = u[i][j][k][m] - u_exact[m];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n for (d = 0; d <= 2; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n }\n rms[m] = sqrt(rms[m]);\n } #pragma omp parallel for private(d ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m ) ", "context_chars": 100, "text": "----------------------------------------------------------*/\n\n int i, j, k, d, m;\n double add;\n\n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n } #pragma omp parallel for private(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(add, m) ", "context_chars": 100, "text": "i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[i][j][k][m];\n\t rms[m] = rms[m] + add*add;\n\t} #pragma omp parallel for private(add, m) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(d ,m ) ", "context_chars": 100, "text": " = 0; m < 5; m++) {\n\t add = rhs[i][j][k][m];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n for (d = 0; d <= 2; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n }\n rms[m] = sqrt(rms[m]);\n } #pragma omp parallel for private(d ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "----------------------*/\n #pragma omp parallel for \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = 0.0;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "d_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = 0.0;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "id_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = 0.0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1)", "context_chars": 100, "text": "es \nc-------------------------------------------------------------------*/\n //for (j = 1; j < grid_points[1]-1; j++) {\n eta = (double)j * dnym1;\n \n for (k = 1; k < grid_points[2]-1; k++) {\n zeta = (double)k * dnzm1;\n\n for (i = 0; i < grid_points[0]; i++) {\n\txi = (double)i * dnxm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t ue[i][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0 / dtemp[0];\n\n\t#pragma omp parallel for\n\tfor (m = 1; m <= 4; m++) {\n\t buf[i][m] = dtpp * dtemp[m];\n\t}\n\n\tcuf[i] = buf[i][1] * buf[i][1];\n\tbuf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + \n\t buf[i][3] * buf[i][3];\n\tq[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +\n\t\t buf[i][3]*ue[i][3]);\n }\n \n #pragma omp parallel for private(i) firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1 ,k ,j ) \n for (i = 1; i < grid_points[0]-1; i++) {\n\tim1 = i-1;\n\tip1 = i+1;\n\n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tx2*(ue[ip1][1]-ue[im1][1])+\n\t dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-\n\t\t (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+\n\t xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+\n\t dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+\n\t xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+\n\t dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);\n \n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+\n\t xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+\n\t dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-\n\t buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+\n\t 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+\n\t xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+\n\t xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+\n\t dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\ti = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);\n\ti = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[i-1][m] + 6.0*ue[i][m] -\n\t 4.0*ue[i+1][m] + ue[i+2][m]);\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (i = 1*3; i <= grid_points[0]-3*1-1; i++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);\n\t}\n }\n\n #pragma omp parallel for private(i) \n for (m = 0; m < 5; m++) {\n\ti = grid_points[0]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m]);\n\ti = grid_points[0]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);\n }\n\n }\n } #pragma omp parallel for firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i = 0; i < grid_points[0]; i++) {\n\txi = (double)i * dnxm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[i][m] = dtemp[m];\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "a omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t ue[i][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0 / dtemp[0];\n\n\tfor (m = 1; m <= 4; m++) {\n\t buf[i][m] = dtpp * dtemp[m];\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(i) firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1 ,k ,j ) ", "context_chars": 100, "text": "(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +\n\t\t buf[i][3]*ue[i][3]);\n }\n \n for (i = 1; i < grid_points[0]-1; i++) {\n\tim1 = i-1;\n\tip1 = i+1;\n\n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tx2*(ue[ip1][1]-ue[im1][1])+\n\t dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-\n\t\t (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+\n\t xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+\n\t dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+\n\t xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+\n\t dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);\n \n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+\n\t xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+\n\t dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-\n\t buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+\n\t 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+\n\t xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+\n\t xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+\n\t dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);\n } #pragma omp parallel for private(i) firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1 ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n\n for (m = 0; m < 5; m++) {\n\ti = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);\n\ti = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[i-1][m] + 6.0*ue[i][m] -\n\t 4.0*ue[i+1][m] + ue[i+2][m]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "- dssp *\n\t (-4.0*ue[i-1][m] + 6.0*ue[i][m] -\n\t 4.0*ue[i+1][m] + ue[i+2][m]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (i = 1*3; i <= grid_points[0]-3*1-1; i++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "1][m] + ue[i+2][m]);\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\tfor (i = 1*3; i <= grid_points[0]-3*1-1; i++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(i) ", "context_chars": 100, "text": "(ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\ti = grid_points[0]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m]);\n\ti = grid_points[0]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);\n } #pragma omp parallel for private(i) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(xi, zeta, eta) ", "context_chars": 100, "text": "differences \nc-------------------------------------------------------------------*/\n //for (i = 1; i < grid_points[0]-1; i++) {\n xi = (double)i * dnxm1;\n \n for (k = 1; k < grid_points[2]-1; k++) {\n zeta = (double)k * dnzm1;\n\n for (j = 0; j < grid_points[1]; j++) {\n\teta = (double)j * dnym1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t ue[j][m] = dtemp[m];\n\t}\n \n\tdtpp = 1.0/dtemp[0];\n\n\t#pragma omp parallel for\n\tfor (m = 1; m <= 4; m++) {\n\t buf[j][m] = dtpp * dtemp[m];\n\t}\n\n\tcuf[j] = buf[j][2] * buf[j][2];\n\tbuf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + \n\t buf[j][3] * buf[j][3];\n\tq[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +\n\t\t buf[j][3]*ue[j][3]);\n }\n\n #pragma omp parallel for private(j, jm1, jp1) firstprivate(dy1ty1 ,ty2 ,dy2ty1 ,yycon2 ,dy3ty1 ,yycon1 ,c2 ,dy4ty1 ,dy5ty1 ,yycon5 ,yycon4 ,yycon3 ,c1 ,k ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n\tjm1 = j-1;\n\tjp1 = j+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t ty2*( ue[jp1][2]-ue[jm1][2] )+\n\t dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+\n\t yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+\n\t dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-\n\t (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+\n\t yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+\n\t dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+\n\t yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+\n\t dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-\n\t buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+\n\t 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+\n buf[jm1][0])+\n\t yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+\n\t yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+\n\t dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n #pragma omp parallel for private(j) \n for (m = 0; m < 5; m++) {\n\tj = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);\n\tj = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[j-1][m] + 6.0*ue[j][m] -\n\t 4.0*ue[j+1][m] + ue[j+2][m]);\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (j = 1*3; j <= grid_points[1]-3*1-1; j++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);\n\t}\n }\n\n #pragma omp parallel for private(j) \n for (m = 0; m < 5; m++) {\n\tj = grid_points[1]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m]);\n\tj = grid_points[1]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);\n }\n\n }\n } #pragma omp parallel for private(xi, zeta, eta) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = 0; j < grid_points[1]; j++) {\n\teta = (double)j * dnym1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[j][m] = dtemp[m];\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "or\n\tfor (m = 0; m < 5; m++) {\n\t ue[j][m] = dtemp[m];\n\t}\n \n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m <= 4; m++) {\n\t buf[j][m] = dtpp * dtemp[m];\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j, jm1, jp1) firstprivate(dy1ty1 ,ty2 ,dy2ty1 ,yycon2 ,dy3ty1 ,yycon1 ,c2 ,dy4ty1 ,dy5ty1 ,yycon5 ,yycon4 ,yycon3 ,c1 ,k ,i ) ", "context_chars": 100, "text": "];\n\tq[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +\n\t\t buf[j][3]*ue[j][3]);\n }\n\n for (j = 1; j < grid_points[1]-1; j++) {\n\tjm1 = j-1;\n\tjp1 = j+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t ty2*( ue[jp1][2]-ue[jm1][2] )+\n\t dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+\n\t yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+\n\t dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-\n\t (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+\n\t yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+\n\t dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+\n\t yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+\n\t dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-\n\t buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+\n\t 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+\n buf[jm1][0])+\n\t yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+\n\t yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+\n\t dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);\n } #pragma omp parallel for private(j, jm1, jp1) firstprivate(dy1ty1 ,ty2 ,dy2ty1 ,yycon2 ,dy3ty1 ,yycon1 ,c2 ,dy4ty1 ,dy5ty1 ,yycon5 ,yycon4 ,yycon3 ,c1 ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tj = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);\n\tj = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[j-1][m] + 6.0*ue[j][m] -\n\t 4.0*ue[j+1][m] + ue[j+2][m]);\n } #pragma omp parallel for private(j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[j-1][m] + 6.0*ue[j][m] -\n\t 4.0*ue[j+1][m] + ue[j+2][m]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (j = 1*3; j <= grid_points[1]-3*1-1; j++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[m] + ue[j+2][m]);\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\tfor (j = 1*3; j <= grid_points[1]-3*1-1; j++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j) ", "context_chars": 100, "text": "(ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tj = grid_points[1]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m]);\n\tj = grid_points[1]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);\n } #pragma omp parallel for private(j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(xi, eta, zeta)", "context_chars": 100, "text": "es \nc-------------------------------------------------------------------*/\n //for (i = 1; i < grid_points[0]-1; i++) {\n xi = (double)i * dnxm1;\n \n for (j = 1; j < grid_points[1]-1; j++) {\n eta = (double)j * dnym1;\n\n for (k = 0; k < grid_points[2]; k++) {\n\tzeta = (double)k * dnzm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t ue[k][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0/dtemp[0];\n\n\t#pragma omp parallel for \n\tfor (m = 1; m <= 4; m++) {\n\t buf[k][m] = dtpp * dtemp[m];\n\t}\n\n\tcuf[k] = buf[k][3] * buf[k][3];\n\tbuf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + \n\t buf[k][2] * buf[k][2];\n\tq[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +\n\t\t buf[k][3]*ue[k][3]);\n }\n\n #pragma omp parallel for private(k, km1, kp1) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tkm1 = k-1;\n\tkp1 = k+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tz2*( ue[kp1][3]-ue[km1][3] )+\n\t dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+\n\t zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+\n\t dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+\n\t zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+\n\t dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-\n\t\t (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+\n\t zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+\n\t dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-\n\t\t buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+\n\t 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]\n +buf[km1][0])+\n\t zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+\n\t zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+\n\t dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n #pragma omp parallel for private(k)\n for (m = 0; m < 5; m++) {\n\tk = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);\n\tk = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[k-1][m] + 6.0*ue[k][m] -\n\t 4.0*ue[k+1][m] + ue[k+2][m]);\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (k = 1*3; k <= grid_points[2]-3*1-1; k++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);\n\t}\n }\n\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n\tk = grid_points[2]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m]);\n\tk = grid_points[2]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);\n }\n\n }\n } #pragma omp parallel for private(xi, eta, zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "= 0; k < grid_points[2]; k++) {\n\tzeta = (double)k * dnzm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[k][m] = dtemp[m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "a omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t ue[k][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m <= 4; m++) {\n\t buf[k][m] = dtpp * dtemp[m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(k, km1, kp1) ", "context_chars": 100, "text": "];\n\tq[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +\n\t\t buf[k][3]*ue[k][3]);\n }\n\n for (k = 1; k < grid_points[2]-1; k++) {\n\tkm1 = k-1;\n\tkp1 = k+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tz2*( ue[kp1][3]-ue[km1][3] )+\n\t dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+\n\t zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+\n\t dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+\n\t zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+\n\t dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-\n\t\t (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+\n\t zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+\n\t dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-\n\t\t buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+\n\t 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]\n +buf[km1][0])+\n\t zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+\n\t zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+\n\t dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);\n } #pragma omp parallel for private(k, km1, kp1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(k)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tk = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);\n\tk = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[k-1][m] + 6.0*ue[k][m] -\n\t 4.0*ue[k+1][m] + ue[k+2][m]);\n } #pragma omp parallel for private(k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[k-1][m] + 6.0*ue[k][m] -\n\t 4.0*ue[k+1][m] + ue[k+2][m]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (k = 1*3; k <= grid_points[2]-3*1-1; k++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][m] + ue[k+2][m]);\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\tfor (k = 1*3; k <= grid_points[2]-3*1-1; k++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tk = grid_points[2]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m]);\n\tk = grid_points[2]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " of the forcing function, \nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];\n\t}\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------*/\n #pragma omp parallel for\n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];\n\t}\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "points[0]-1; i++) {\n #pragma omp parallel for\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];\n\t}\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_points[1]-1; j++) {\n #pragma omp parallel for\n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "xi, eta, zeta \nc-------------------------------------------------------------------*/\n\n int m;\n\n for (m = 0; m < 5; m++) {\n dtemp[m] = ce[m][0] +\n xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7]\n\t\t\t\t\t+ xi*ce[m][10]))) +\n eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8]\n\t\t\t\t\t + eta*ce[m][11])))+\n zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] + \n\t\t\t\t\t zeta*ce[m][12])));\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ing the whole thing here. \nc-------------------------------------------------------------------*/\n for (i = 0; i < IMAX; i++) {\n #pragma omp parallel for \n for (j = 0; j < IMAX; j++) {\n #pragma omp parallel for\n for (k = 0; k < IMAX; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = 1.0;\n\t}\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------------------------------*/\n #pragma omp parallel for\n for (i = 0; i < IMAX; i++) {\n for (j = 0; j < IMAX; j++) {\n #pragma omp parallel for\n for (k = 0; k < IMAX; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = 1.0;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " for (i = 0; i < IMAX; i++) {\n #pragma omp parallel for \n for (j = 0; j < IMAX; j++) {\n for (k = 0; k < IMAX; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = 1.0;\n\t}\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " for (j = 0; j < IMAX; j++) {\n #pragma omp parallel for\n for (k = 0; k < IMAX; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = 1.0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n \n for (k = 0; k < grid_points[2]; k++) {\n\tzeta = (double)k * dnzm1;\n \n\tfor (ix = 0; ix < 2; ix++) {\n\t exact_solution((double)ix, eta, zeta, \n &(Pface[ix][0][0]));\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(Pxi, Peta, Pzeta) ", "context_chars": 100, "text": " iz++) {\n\t exact_solution(xi, eta, (double)iz, \n &Pface[iz][2][0]);\n\t}\n\n\tfor (m = 0; m < 5; m++) {\n\t Pxi = xi * Pface[1][0][m] + \n\t (1.0-xi) * Pface[0][0][m];\n\t Peta = eta * Pface[1][1][m] + \n\t (1.0-eta) * Pface[0][1][m];\n\t Pzeta = zeta * Pface[1][2][m] + \n\t (1.0-zeta) * Pface[0][2][m];\n \n\t u[i][j][k][m] = Pxi + Peta + Pzeta - \n\t Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + \n\t Pxi*Peta*Pzeta;\n\t} #pragma omp parallel for private(Pxi, Peta, Pzeta) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[i][j][k][m] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[i][j][k][m] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[i][j][k][m] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[i][j][k][m] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "d_points[1]; j++) {\n eta = (double)j * dnym1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[i][j][k][m] = temp[m];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "eft hand side for starters\nc-------------------------------------------------------------------*/\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t #pragma omp parallel for \n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t }\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------------------*/\n #pragma omp parallel for \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t #pragma omp parallel for \n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t }\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "id_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t #pragma omp parallel for \n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "rid_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t #pragma omp parallel for \n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t }\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s overkill, but convenient\nc-------------------------------------------------------------------*/\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t lhs[i][j][k][1][m][m] = 1.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------------------*/\n #pragma omp parallel for \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t lhs[i][j][k][1][m][m] = 1.0;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "d_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t lhs[i][j][k][1][m][m] = 1.0;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "id_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t lhs[i][j][k][1][m][m] = 1.0;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2, tmp3)", "context_chars": 100, "text": "tmp3)\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n for (i = 0; i < grid_points[0]; i++) {\n\n\ttmp1 = 1.0 / u[i][j][k][0];\n\ttmp2 = tmp1 * tmp1;\n\ttmp3 = tmp1 * tmp2;\n/*--------------------------------------------------------------------\nc \nc-------------------------------------------------------------------*/\n\tfjac[ i][ j][ k][0][0] = 0.0;\n\tfjac[ i][ j][ k][0][1] = 1.0;\n\tfjac[ i][ j][ k][0][2] = 0.0;\n\tfjac[ i][ j][ k][0][3] = 0.0;\n\tfjac[ i][ j][ k][0][4] = 0.0;\n\n\tfjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 * \n\t\t\t\t u[i][j][k][1])\n\t + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] ) * tmp2;\n\tfjac[i][j][k][1][1] = ( 2.0 - c2 )\n\t * ( u[i][j][k][1] / u[i][j][k][0] );\n\tfjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 );\n\tfjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 );\n\tfjac[i][j][k][1][4] = c2;\n\n\tfjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2;\n\tfjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][2][3] = 0.0;\n\tfjac[i][j][k][2][4] = 0.0;\n\n\tfjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2;\n\tfjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][3][2] = 0.0;\n\tfjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][3][4] = 0.0;\n\n\tfjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t\t\t + u[i][j][k][3] * u[i][j][k][3] ) * tmp2\n\t\t\t\t- c1 * ( u[i][j][k][4] * tmp1 ) )\n\t * ( u[i][j][k][1] * tmp1 );\n\tfjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 \n\t - 0.50 * c2\n\t * ( 3.0*u[i][j][k][1]*u[i][j][k][1]\n\t + u[i][j][k][2]*u[i][j][k][2]\n\t + u[i][j][k][3]*u[i][j][k][3] ) * tmp2;\n\tfjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] )\n\t * tmp2;\n\tfjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] )\n\t * tmp2;\n\tfjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 );\n\n\tnjac[i][j][k][0][0] = 0.0;\n\tnjac[i][j][k][0][1] = 0.0;\n\tnjac[i][j][k][0][2] = 0.0;\n\tnjac[i][j][k][0][3] = 0.0;\n\tnjac[i][j][k][0][4] = 0.0;\n\n\tnjac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][1][1] = con43 * c3c4 * tmp1;\n\tnjac[i][j][k][1][2] = 0.0;\n\tnjac[i][j][k][1][3] = 0.0;\n\tnjac[i][j][k][1][4] = 0.0;\n\n\tnjac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][2][1] = 0.0;\n\tnjac[i][j][k][2][2] = c3c4 * tmp1;\n\tnjac[i][j][k][2][3] = 0.0;\n\tnjac[i][j][k][2][4] = 0.0;\n\n\tnjac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][3][1] = 0.0;\n\tnjac[i][j][k][3][2] = 0.0;\n\tnjac[i][j][k][3][3] = c3c4 * tmp1;\n\tnjac[i][j][k][3][4] = 0.0;\n\n\tnjac[i][j][k][4][0] = - ( con43 * c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][1]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))\n\t - c1345 * tmp2 * u[i][j][k][4];\n\n\tnjac[i][j][k][4][1] = ( con43 * c3c4\n\t\t\t\t- c1345 ) * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][4][4] = ( c1345 ) * tmp1;\n\n } #pragma omp parallel for private(tmp1, tmp2, tmp3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2)", "context_chars": 100, "text": "nd side in x direction\nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n\n\ttmp1 = dt * tx1;\n\ttmp2 = dt * tx2;\n\n\tlhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0]\n\t - tmp1 * njac[i-1][j][k][0][0]\n\t - tmp1 * dx1;\n\tlhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1]\n\t - tmp1 * njac[i-1][j][k][0][1];\n\tlhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2]\n\t - tmp1 * njac[i-1][j][k][0][2];\n\tlhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3]\n\t - tmp1 * njac[i-1][j][k][0][3];\n\tlhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4]\n\t - tmp1 * njac[i-1][j][k][0][4];\n\n\tlhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0]\n\t - tmp1 * njac[i-1][j][k][1][0];\n\tlhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1]\n\t - tmp1 * njac[i-1][j][k][1][1]\n\t - tmp1 * dx2;\n\tlhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2]\n\t - tmp1 * njac[i-1][j][k][1][2];\n\tlhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3]\n\t - tmp1 * njac[i-1][j][k][1][3];\n\tlhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4]\n\t - tmp1 * njac[i-1][j][k][1][4];\n\n\tlhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0]\n\t - tmp1 * njac[i-1][j][k][2][0];\n\tlhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1]\n\t - tmp1 * njac[i-1][j][k][2][1];\n\tlhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2]\n\t - tmp1 * njac[i-1][j][k][2][2]\n\t - tmp1 * dx3;\n\tlhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3]\n\t - tmp1 * njac[i-1][j][k][2][3];\n\tlhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4]\n\t - tmp1 * njac[i-1][j][k][2][4];\n\n\tlhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0]\n\t - tmp1 * njac[i-1][j][k][3][0];\n\tlhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1]\n\t - tmp1 * njac[i-1][j][k][3][1];\n\tlhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2]\n\t - tmp1 * njac[i-1][j][k][3][2];\n\tlhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3]\n\t - tmp1 * njac[i-1][j][k][3][3]\n\t - tmp1 * dx4;\n\tlhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4]\n\t - tmp1 * njac[i-1][j][k][3][4];\n\n\tlhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0]\n\t - tmp1 * njac[i-1][j][k][4][0];\n\tlhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1]\n\t - tmp1 * njac[i-1][j][k][4][1];\n\tlhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2]\n\t - tmp1 * njac[i-1][j][k][4][2];\n\tlhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3]\n\t - tmp1 * njac[i-1][j][k][4][3];\n\tlhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4]\n\t - tmp1 * njac[i-1][j][k][4][4]\n\t - tmp1 * dx5;\n\n\tlhs[i][j][k][BB][0][0] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][0][0]\n\t + tmp1 * 2.0 * dx1;\n\tlhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n\tlhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n\tlhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n\tlhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n\n\tlhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n\tlhs[i][j][k][BB][1][1] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][1][1]\n\t + tmp1 * 2.0 * dx2;\n\tlhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n\tlhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n\tlhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n\n\tlhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n\tlhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n\tlhs[i][j][k][BB][2][2] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][2][2]\n\t + tmp1 * 2.0 * dx3;\n\tlhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n\tlhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n\n\tlhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n\tlhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n\tlhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n\tlhs[i][j][k][BB][3][3] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][3][3]\n\t + tmp1 * 2.0 * dx4;\n\tlhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n\n\tlhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n\tlhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n\tlhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n\tlhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n\tlhs[i][j][k][BB][4][4] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][4][4]\n\t + tmp1 * 2.0 * dx5;\n\n\tlhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0]\n\t - tmp1 * njac[i+1][j][k][0][0]\n\t - tmp1 * dx1;\n\tlhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1]\n\t - tmp1 * njac[i+1][j][k][0][1];\n\tlhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2]\n\t - tmp1 * njac[i+1][j][k][0][2];\n\tlhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3]\n\t - tmp1 * njac[i+1][j][k][0][3];\n\tlhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4]\n\t - tmp1 * njac[i+1][j][k][0][4];\n\n\tlhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0]\n\t - tmp1 * njac[i+1][j][k][1][0];\n\tlhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1]\n\t - tmp1 * njac[i+1][j][k][1][1]\n\t - tmp1 * dx2;\n\tlhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2]\n\t - tmp1 * njac[i+1][j][k][1][2];\n\tlhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3]\n\t - tmp1 * njac[i+1][j][k][1][3];\n\tlhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4]\n\t - tmp1 * njac[i+1][j][k][1][4];\n\n\tlhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0]\n\t - tmp1 * njac[i+1][j][k][2][0];\n\tlhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1]\n\t - tmp1 * njac[i+1][j][k][2][1];\n\tlhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2]\n\t - tmp1 * njac[i+1][j][k][2][2]\n\t - tmp1 * dx3;\n\tlhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3]\n\t - tmp1 * njac[i+1][j][k][2][3];\n\tlhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4]\n\t - tmp1 * njac[i+1][j][k][2][4];\n\n\tlhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0]\n\t - tmp1 * njac[i+1][j][k][3][0];\n\tlhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1]\n\t - tmp1 * njac[i+1][j][k][3][1];\n\tlhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2]\n\t - tmp1 * njac[i+1][j][k][3][2];\n\tlhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3]\n\t - tmp1 * njac[i+1][j][k][3][3]\n\t - tmp1 * dx4;\n\tlhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4]\n\t - tmp1 * njac[i+1][j][k][3][4];\n\n\tlhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0]\n\t - tmp1 * njac[i+1][j][k][4][0];\n\tlhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1]\n\t - tmp1 * njac[i+1][j][k][4][1];\n\tlhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2]\n\t - tmp1 * njac[i+1][j][k][4][2];\n\tlhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3]\n\t - tmp1 * njac[i+1][j][k][4][3];\n\tlhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4]\n\t - tmp1 * njac[i+1][j][k][4][4]\n\t - tmp1 * dx5;\n\n } #pragma omp parallel for private(tmp1, tmp2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2, tmp3)", "context_chars": 100, "text": ", tmp3)\n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n\ttmp1 = 1.0 / u[i][j][k][0];\n\ttmp2 = tmp1 * tmp1;\n\ttmp3 = tmp1 * tmp2;\n\n\tfjac[ i][ j][ k][0][0] = 0.0;\n\tfjac[ i][ j][ k][0][1] = 0.0;\n\tfjac[ i][ j][ k][0][2] = 1.0;\n\tfjac[ i][ j][ k][0][3] = 0.0;\n\tfjac[ i][ j][ k][0][4] = 0.0;\n\n\tfjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] )\n\t * tmp2;\n\tfjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][1][3] = 0.0;\n\tfjac[i][j][k][1][4] = 0.0;\n\n\tfjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2)\n\t + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t\t\t * tmp2 );\n\tfjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][2][2] = ( 2.0 - c2 )\n\t * u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][2][4] = c2;\n\n\tfjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][3][1] = 0.0;\n\tfjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][3][4] = 0.0;\n\n\tfjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t\t\t+ u[i][j][k][2] * u[i][j][k][2]\n\t\t\t\t\t+ u[i][j][k][3] * u[i][j][k][3] )\n\t\t\t\t* tmp2\n\t\t\t\t- c1 * u[i][j][k][4] * tmp1 ) \n\t * u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2] \n\t * tmp2;\n\tfjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 \n\t - 0.50 * c2 \n\t * ( ( u[i][j][k][1]*u[i][j][k][1]\n\t\t + 3.0 * u[i][j][k][2]*u[i][j][k][2]\n\t\t + u[i][j][k][3]*u[i][j][k][3] )\n\t * tmp2 );\n\tfjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1; \n\n\tnjac[i][j][k][0][0] = 0.0;\n\tnjac[i][j][k][0][1] = 0.0;\n\tnjac[i][j][k][0][2] = 0.0;\n\tnjac[i][j][k][0][3] = 0.0;\n\tnjac[i][j][k][0][4] = 0.0;\n\n\tnjac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][1][1] = c3c4 * tmp1;\n\tnjac[i][j][k][1][2] = 0.0;\n\tnjac[i][j][k][1][3] = 0.0;\n\tnjac[i][j][k][1][4] = 0.0;\n\n\tnjac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][2][1] = 0.0;\n\tnjac[i][j][k][2][2] = con43 * c3c4 * tmp1;\n\tnjac[i][j][k][2][3] = 0.0;\n\tnjac[i][j][k][2][4] = 0.0;\n\n\tnjac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][3][1] = 0.0;\n\tnjac[i][j][k][3][2] = 0.0;\n\tnjac[i][j][k][3][3] = c3c4 * tmp1;\n\tnjac[i][j][k][3][4] = 0.0;\n\n\tnjac[i][j][k][4][0] = - ( c3c4\n - c1345 ) * tmp3 * (pow2(u[i][j][k][1]))\n\t - ( con43 * c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))\n\t - c1345 * tmp2 * u[i][j][k][4];\n\n\tnjac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][4][2] = ( con43 * c3c4\n\t\t\t\t- c1345 ) * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][4][4] = ( c1345 ) * tmp1;\n\n } #pragma omp parallel for private(tmp1, tmp2, tmp3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2)", "context_chars": 100, "text": "t hand side in y direction\nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for private(tmp1, tmp2)\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n\ttmp1 = dt * ty1;\n\ttmp2 = dt * ty2;\n\n\tlhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0]\n\t - tmp1 * njac[i][j-1][k][0][0]\n\t - tmp1 * dy1;\n\tlhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1]\n\t - tmp1 * njac[i][j-1][k][0][1];\n\tlhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2]\n\t - tmp1 * njac[i][j-1][k][0][2];\n\tlhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3]\n\t - tmp1 * njac[i][j-1][k][0][3];\n\tlhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4]\n\t - tmp1 * njac[i][j-1][k][0][4];\n\n\tlhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0]\n\t - tmp1 * njac[i][j-1][k][1][0];\n\tlhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1]\n\t - tmp1 * njac[i][j-1][k][1][1]\n\t - tmp1 * dy2;\n\tlhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2]\n\t - tmp1 * njac[i][j-1][k][1][2];\n\tlhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3]\n\t - tmp1 * njac[i][j-1][k][1][3];\n\tlhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4]\n\t - tmp1 * njac[i][j-1][k][1][4];\n\n\tlhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0]\n\t - tmp1 * njac[i][j-1][k][2][0];\n\tlhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1]\n\t - tmp1 * njac[i][j-1][k][2][1];\n\tlhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2]\n\t - tmp1 * njac[i][j-1][k][2][2]\n\t - tmp1 * dy3;\n\tlhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3]\n\t - tmp1 * njac[i][j-1][k][2][3];\n\tlhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4]\n\t - tmp1 * njac[i][j-1][k][2][4];\n\n\tlhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0]\n\t - tmp1 * njac[i][j-1][k][3][0];\n\tlhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1]\n\t - tmp1 * njac[i][j-1][k][3][1];\n\tlhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2]\n\t - tmp1 * njac[i][j-1][k][3][2];\n\tlhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3]\n\t - tmp1 * njac[i][j-1][k][3][3]\n\t - tmp1 * dy4;\n\tlhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4]\n\t - tmp1 * njac[i][j-1][k][3][4];\n\n\tlhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0]\n\t - tmp1 * njac[i][j-1][k][4][0];\n\tlhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1]\n\t - tmp1 * njac[i][j-1][k][4][1];\n\tlhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2]\n\t - tmp1 * njac[i][j-1][k][4][2];\n\tlhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3]\n\t - tmp1 * njac[i][j-1][k][4][3];\n\tlhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4]\n\t - tmp1 * njac[i][j-1][k][4][4]\n\t - tmp1 * dy5;\n\n\tlhs[i][j][k][BB][0][0] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][0][0]\n\t + tmp1 * 2.0 * dy1;\n\tlhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n\tlhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n\tlhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n\tlhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n\n\tlhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n\tlhs[i][j][k][BB][1][1] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][1][1]\n\t + tmp1 * 2.0 * dy2;\n\tlhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n\tlhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n\tlhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n\n\tlhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n\tlhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n\tlhs[i][j][k][BB][2][2] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][2][2]\n\t + tmp1 * 2.0 * dy3;\n\tlhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n\tlhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n\n\tlhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n\tlhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n\tlhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n\tlhs[i][j][k][BB][3][3] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][3][3]\n\t + tmp1 * 2.0 * dy4;\n\tlhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n\n\tlhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n\tlhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n\tlhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n\tlhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n\tlhs[i][j][k][BB][4][4] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][4][4] \n\t + tmp1 * 2.0 * dy5;\n\n\tlhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0]\n\t - tmp1 * njac[i][j+1][k][0][0]\n\t - tmp1 * dy1;\n\tlhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1]\n\t - tmp1 * njac[i][j+1][k][0][1];\n\tlhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2]\n\t - tmp1 * njac[i][j+1][k][0][2];\n\tlhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3]\n\t - tmp1 * njac[i][j+1][k][0][3];\n\tlhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4]\n\t - tmp1 * njac[i][j+1][k][0][4];\n\n\tlhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0]\n\t - tmp1 * njac[i][j+1][k][1][0];\n\tlhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1]\n\t - tmp1 * njac[i][j+1][k][1][1]\n\t - tmp1 * dy2;\n\tlhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2]\n\t - tmp1 * njac[i][j+1][k][1][2];\n\tlhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3]\n\t - tmp1 * njac[i][j+1][k][1][3];\n\tlhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4]\n\t - tmp1 * njac[i][j+1][k][1][4];\n\n\tlhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0]\n\t - tmp1 * njac[i][j+1][k][2][0];\n\tlhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1]\n\t - tmp1 * njac[i][j+1][k][2][1];\n\tlhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2]\n\t - tmp1 * njac[i][j+1][k][2][2]\n\t - tmp1 * dy3;\n\tlhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3]\n\t - tmp1 * njac[i][j+1][k][2][3];\n\tlhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4]\n\t - tmp1 * njac[i][j+1][k][2][4];\n\n\tlhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0]\n\t - tmp1 * njac[i][j+1][k][3][0];\n\tlhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1]\n\t - tmp1 * njac[i][j+1][k][3][1];\n\tlhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2]\n\t - tmp1 * njac[i][j+1][k][3][2];\n\tlhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3]\n\t - tmp1 * njac[i][j+1][k][3][3]\n\t - tmp1 * dy4;\n\tlhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4]\n\t - tmp1 * njac[i][j+1][k][3][4];\n\n\tlhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0]\n\t - tmp1 * njac[i][j+1][k][4][0];\n\tlhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1]\n\t - tmp1 * njac[i][j+1][k][4][1];\n\tlhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2]\n\t - tmp1 * njac[i][j+1][k][4][2];\n\tlhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3]\n\t - tmp1 * njac[i][j+1][k][4][3];\n\tlhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4]\n\t - tmp1 * njac[i][j+1][k][4][4]\n\t - tmp1 * dy5;\n\n }\n }\n } #pragma omp parallel for private(tmp1, tmp2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2)", "context_chars": 100, "text": "---*/\n #pragma omp parallel for private(tmp1, tmp2)\n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n\ttmp1 = dt * ty1;\n\ttmp2 = dt * ty2;\n\n\tlhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0]\n\t - tmp1 * njac[i][j-1][k][0][0]\n\t - tmp1 * dy1;\n\tlhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1]\n\t - tmp1 * njac[i][j-1][k][0][1];\n\tlhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2]\n\t - tmp1 * njac[i][j-1][k][0][2];\n\tlhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3]\n\t - tmp1 * njac[i][j-1][k][0][3];\n\tlhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4]\n\t - tmp1 * njac[i][j-1][k][0][4];\n\n\tlhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0]\n\t - tmp1 * njac[i][j-1][k][1][0];\n\tlhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1]\n\t - tmp1 * njac[i][j-1][k][1][1]\n\t - tmp1 * dy2;\n\tlhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2]\n\t - tmp1 * njac[i][j-1][k][1][2];\n\tlhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3]\n\t - tmp1 * njac[i][j-1][k][1][3];\n\tlhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4]\n\t - tmp1 * njac[i][j-1][k][1][4];\n\n\tlhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0]\n\t - tmp1 * njac[i][j-1][k][2][0];\n\tlhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1]\n\t - tmp1 * njac[i][j-1][k][2][1];\n\tlhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2]\n\t - tmp1 * njac[i][j-1][k][2][2]\n\t - tmp1 * dy3;\n\tlhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3]\n\t - tmp1 * njac[i][j-1][k][2][3];\n\tlhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4]\n\t - tmp1 * njac[i][j-1][k][2][4];\n\n\tlhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0]\n\t - tmp1 * njac[i][j-1][k][3][0];\n\tlhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1]\n\t - tmp1 * njac[i][j-1][k][3][1];\n\tlhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2]\n\t - tmp1 * njac[i][j-1][k][3][2];\n\tlhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3]\n\t - tmp1 * njac[i][j-1][k][3][3]\n\t - tmp1 * dy4;\n\tlhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4]\n\t - tmp1 * njac[i][j-1][k][3][4];\n\n\tlhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0]\n\t - tmp1 * njac[i][j-1][k][4][0];\n\tlhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1]\n\t - tmp1 * njac[i][j-1][k][4][1];\n\tlhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2]\n\t - tmp1 * njac[i][j-1][k][4][2];\n\tlhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3]\n\t - tmp1 * njac[i][j-1][k][4][3];\n\tlhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4]\n\t - tmp1 * njac[i][j-1][k][4][4]\n\t - tmp1 * dy5;\n\n\tlhs[i][j][k][BB][0][0] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][0][0]\n\t + tmp1 * 2.0 * dy1;\n\tlhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n\tlhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n\tlhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n\tlhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n\n\tlhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n\tlhs[i][j][k][BB][1][1] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][1][1]\n\t + tmp1 * 2.0 * dy2;\n\tlhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n\tlhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n\tlhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n\n\tlhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n\tlhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n\tlhs[i][j][k][BB][2][2] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][2][2]\n\t + tmp1 * 2.0 * dy3;\n\tlhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n\tlhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n\n\tlhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n\tlhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n\tlhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n\tlhs[i][j][k][BB][3][3] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][3][3]\n\t + tmp1 * 2.0 * dy4;\n\tlhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n\n\tlhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n\tlhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n\tlhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n\tlhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n\tlhs[i][j][k][BB][4][4] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][4][4] \n\t + tmp1 * 2.0 * dy5;\n\n\tlhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0]\n\t - tmp1 * njac[i][j+1][k][0][0]\n\t - tmp1 * dy1;\n\tlhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1]\n\t - tmp1 * njac[i][j+1][k][0][1];\n\tlhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2]\n\t - tmp1 * njac[i][j+1][k][0][2];\n\tlhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3]\n\t - tmp1 * njac[i][j+1][k][0][3];\n\tlhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4]\n\t - tmp1 * njac[i][j+1][k][0][4];\n\n\tlhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0]\n\t - tmp1 * njac[i][j+1][k][1][0];\n\tlhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1]\n\t - tmp1 * njac[i][j+1][k][1][1]\n\t - tmp1 * dy2;\n\tlhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2]\n\t - tmp1 * njac[i][j+1][k][1][2];\n\tlhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3]\n\t - tmp1 * njac[i][j+1][k][1][3];\n\tlhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4]\n\t - tmp1 * njac[i][j+1][k][1][4];\n\n\tlhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0]\n\t - tmp1 * njac[i][j+1][k][2][0];\n\tlhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1]\n\t - tmp1 * njac[i][j+1][k][2][1];\n\tlhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2]\n\t - tmp1 * njac[i][j+1][k][2][2]\n\t - tmp1 * dy3;\n\tlhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3]\n\t - tmp1 * njac[i][j+1][k][2][3];\n\tlhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4]\n\t - tmp1 * njac[i][j+1][k][2][4];\n\n\tlhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0]\n\t - tmp1 * njac[i][j+1][k][3][0];\n\tlhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1]\n\t - tmp1 * njac[i][j+1][k][3][1];\n\tlhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2]\n\t - tmp1 * njac[i][j+1][k][3][2];\n\tlhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3]\n\t - tmp1 * njac[i][j+1][k][3][3]\n\t - tmp1 * dy4;\n\tlhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4]\n\t - tmp1 * njac[i][j+1][k][3][4];\n\n\tlhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0]\n\t - tmp1 * njac[i][j+1][k][4][0];\n\tlhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1]\n\t - tmp1 * njac[i][j+1][k][4][1];\n\tlhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2]\n\t - tmp1 * njac[i][j+1][k][4][2];\n\tlhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3]\n\t - tmp1 * njac[i][j+1][k][4][3];\n\tlhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4]\n\t - tmp1 * njac[i][j+1][k][4][4]\n\t - tmp1 * dy5;\n\n }\n } #pragma omp parallel for private(tmp1, tmp2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2, tmp3) ", "context_chars": 100, "text": "led f) and s jacobians\nc---------------------------------------------------------------------*/\n //for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for private(tmp1, tmp2, tmp3)\n for (k = 0; k < grid_points[2]; k++) {\n\n\ttmp1 = 1.0 / u[i][j][k][0];\n\ttmp2 = tmp1 * tmp1;\n\ttmp3 = tmp1 * tmp2;\n\n\tfjac[i][j][k][0][0] = 0.0;\n\tfjac[i][j][k][0][1] = 0.0;\n\tfjac[i][j][k][0][2] = 0.0;\n\tfjac[i][j][k][0][3] = 1.0;\n\tfjac[i][j][k][0][4] = 0.0;\n\n\tfjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) \n\t * tmp2;\n\tfjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][1][2] = 0.0;\n\tfjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][1][4] = 0.0;\n\n\tfjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][2][1] = 0.0;\n\tfjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][2][4] = 0.0;\n\n\tfjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) \n\t + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t\t + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );\n\tfjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][3][3] = ( 2.0 - c2 )\n\t * u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][3][4] = c2;\n\n\tfjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t\t\t+ u[i][j][k][2] * u[i][j][k][2]\n\t\t\t\t\t+ u[i][j][k][3] * u[i][j][k][3] )\n\t\t\t\t* tmp2\n\t\t\t\t- c1 * ( u[i][j][k][4] * tmp1 ) )\n\t * ( u[i][j][k][3] * tmp1 );\n\tfjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )\n\t - 0.50 * c2\n\t * ( ( u[i][j][k][1]*u[i][j][k][1]\n\t\t + u[i][j][k][2]*u[i][j][k][2]\n\t\t + 3.0*u[i][j][k][3]*u[i][j][k][3] )\n\t * tmp2 );\n\tfjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;\n\n\tnjac[i][j][k][0][0] = 0.0;\n\tnjac[i][j][k][0][1] = 0.0;\n\tnjac[i][j][k][0][2] = 0.0;\n\tnjac[i][j][k][0][3] = 0.0;\n\tnjac[i][j][k][0][4] = 0.0;\n\n\tnjac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][1][1] = c3c4 * tmp1;\n\tnjac[i][j][k][1][2] = 0.0;\n\tnjac[i][j][k][1][3] = 0.0;\n\tnjac[i][j][k][1][4] = 0.0;\n\n\tnjac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][2][1] = 0.0;\n\tnjac[i][j][k][2][2] = c3c4 * tmp1;\n\tnjac[i][j][k][2][3] = 0.0;\n\tnjac[i][j][k][2][4] = 0.0;\n\n\tnjac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][3][1] = 0.0;\n\tnjac[i][j][k][3][2] = 0.0;\n\tnjac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;\n\tnjac[i][j][k][3][4] = 0.0;\n\n\tnjac[i][j][k][4][0] = - ( c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][1]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))\n\t - ( con43 * c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))\n\t - c1345 * tmp2 * u[i][j][k][4];\n\n\tnjac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][4][3] = ( con43 * c3c4\n\t\t\t\t- c1345 ) * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][4][4] = ( c1345 )* tmp1;\n\n }\n }\n } #pragma omp parallel for private(tmp1, tmp2, tmp3) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2, tmp3)", "context_chars": 100, "text": "mp3) \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\n\ttmp1 = 1.0 / u[i][j][k][0];\n\ttmp2 = tmp1 * tmp1;\n\ttmp3 = tmp1 * tmp2;\n\n\tfjac[i][j][k][0][0] = 0.0;\n\tfjac[i][j][k][0][1] = 0.0;\n\tfjac[i][j][k][0][2] = 0.0;\n\tfjac[i][j][k][0][3] = 1.0;\n\tfjac[i][j][k][0][4] = 0.0;\n\n\tfjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) \n\t * tmp2;\n\tfjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][1][2] = 0.0;\n\tfjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][1][4] = 0.0;\n\n\tfjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][2][1] = 0.0;\n\tfjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][2][4] = 0.0;\n\n\tfjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) \n\t + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t\t + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );\n\tfjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][3][3] = ( 2.0 - c2 )\n\t * u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][3][4] = c2;\n\n\tfjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t\t\t+ u[i][j][k][2] * u[i][j][k][2]\n\t\t\t\t\t+ u[i][j][k][3] * u[i][j][k][3] )\n\t\t\t\t* tmp2\n\t\t\t\t- c1 * ( u[i][j][k][4] * tmp1 ) )\n\t * ( u[i][j][k][3] * tmp1 );\n\tfjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )\n\t - 0.50 * c2\n\t * ( ( u[i][j][k][1]*u[i][j][k][1]\n\t\t + u[i][j][k][2]*u[i][j][k][2]\n\t\t + 3.0*u[i][j][k][3]*u[i][j][k][3] )\n\t * tmp2 );\n\tfjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;\n\n\tnjac[i][j][k][0][0] = 0.0;\n\tnjac[i][j][k][0][1] = 0.0;\n\tnjac[i][j][k][0][2] = 0.0;\n\tnjac[i][j][k][0][3] = 0.0;\n\tnjac[i][j][k][0][4] = 0.0;\n\n\tnjac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][1][1] = c3c4 * tmp1;\n\tnjac[i][j][k][1][2] = 0.0;\n\tnjac[i][j][k][1][3] = 0.0;\n\tnjac[i][j][k][1][4] = 0.0;\n\n\tnjac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][2][1] = 0.0;\n\tnjac[i][j][k][2][2] = c3c4 * tmp1;\n\tnjac[i][j][k][2][3] = 0.0;\n\tnjac[i][j][k][2][4] = 0.0;\n\n\tnjac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][3][1] = 0.0;\n\tnjac[i][j][k][3][2] = 0.0;\n\tnjac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;\n\tnjac[i][j][k][3][4] = 0.0;\n\n\tnjac[i][j][k][4][0] = - ( c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][1]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))\n\t - ( con43 * c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))\n\t - c1345 * tmp2 * u[i][j][k][4];\n\n\tnjac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][4][3] = ( con43 * c3c4\n\t\t\t\t- c1345 ) * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][4][4] = ( c1345 )* tmp1;\n\n } #pragma omp parallel for private(tmp1, tmp2, tmp3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2) ", "context_chars": 100, "text": "t hand side in z direction\nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for private(tmp1, tmp2)\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n\ttmp1 = dt * tz1;\n\ttmp2 = dt * tz2;\n\n\tlhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0]\n\t - tmp1 * njac[i][j][k-1][0][0]\n\t - tmp1 * dz1;\n\tlhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1]\n\t - tmp1 * njac[i][j][k-1][0][1];\n\tlhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2]\n\t - tmp1 * njac[i][j][k-1][0][2];\n\tlhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3]\n\t - tmp1 * njac[i][j][k-1][0][3];\n\tlhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4]\n\t - tmp1 * njac[i][j][k-1][0][4];\n\n\tlhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0]\n\t - tmp1 * njac[i][j][k-1][1][0];\n\tlhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1]\n\t - tmp1 * njac[i][j][k-1][1][1]\n\t - tmp1 * dz2;\n\tlhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2]\n\t - tmp1 * njac[i][j][k-1][1][2];\n\tlhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3]\n\t - tmp1 * njac[i][j][k-1][1][3];\n\tlhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4]\n\t - tmp1 * njac[i][j][k-1][1][4];\n\n\tlhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0]\n\t - tmp1 * njac[i][j][k-1][2][0];\n\tlhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1]\n\t - tmp1 * njac[i][j][k-1][2][1];\n\tlhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2]\n\t - tmp1 * njac[i][j][k-1][2][2]\n\t - tmp1 * dz3;\n\tlhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3]\n\t - tmp1 * njac[i][j][k-1][2][3];\n\tlhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4]\n\t - tmp1 * njac[i][j][k-1][2][4];\n\n\tlhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0]\n\t - tmp1 * njac[i][j][k-1][3][0];\n\tlhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1]\n\t - tmp1 * njac[i][j][k-1][3][1];\n\tlhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2]\n\t - tmp1 * njac[i][j][k-1][3][2];\n\tlhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3]\n\t - tmp1 * njac[i][j][k-1][3][3]\n\t - tmp1 * dz4;\n\tlhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4]\n\t - tmp1 * njac[i][j][k-1][3][4];\n\n\tlhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0]\n\t - tmp1 * njac[i][j][k-1][4][0];\n\tlhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1]\n\t - tmp1 * njac[i][j][k-1][4][1];\n\tlhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2]\n\t - tmp1 * njac[i][j][k-1][4][2];\n\tlhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3]\n\t - tmp1 * njac[i][j][k-1][4][3];\n\tlhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4]\n\t - tmp1 * njac[i][j][k-1][4][4]\n\t - tmp1 * dz5;\n\n\tlhs[i][j][k][BB][0][0] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][0][0]\n\t + tmp1 * 2.0 * dz1;\n\tlhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n\tlhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n\tlhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n\tlhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n\n\tlhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n\tlhs[i][j][k][BB][1][1] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][1][1]\n\t + tmp1 * 2.0 * dz2;\n\tlhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n\tlhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n\tlhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n\n\tlhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n\tlhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n\tlhs[i][j][k][BB][2][2] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][2][2]\n\t + tmp1 * 2.0 * dz3;\n\tlhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n\tlhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n\n\tlhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n\tlhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n\tlhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n\tlhs[i][j][k][BB][3][3] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][3][3]\n\t + tmp1 * 2.0 * dz4;\n\tlhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n\n\tlhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n\tlhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n\tlhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n\tlhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n\tlhs[i][j][k][BB][4][4] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][4][4]\n\t + tmp1 * 2.0 * dz5;\n\n\tlhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0]\n\t - tmp1 * njac[i][j][k+1][0][0]\n\t - tmp1 * dz1;\n\tlhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1]\n\t - tmp1 * njac[i][j][k+1][0][1];\n\tlhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2]\n\t - tmp1 * njac[i][j][k+1][0][2];\n\tlhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3]\n\t - tmp1 * njac[i][j][k+1][0][3];\n\tlhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4]\n\t - tmp1 * njac[i][j][k+1][0][4];\n\n\tlhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0]\n\t - tmp1 * njac[i][j][k+1][1][0];\n\tlhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1]\n\t - tmp1 * njac[i][j][k+1][1][1]\n\t - tmp1 * dz2;\n\tlhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2]\n\t - tmp1 * njac[i][j][k+1][1][2];\n\tlhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3]\n\t - tmp1 * njac[i][j][k+1][1][3];\n\tlhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4]\n\t - tmp1 * njac[i][j][k+1][1][4];\n\n\tlhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0]\n\t - tmp1 * njac[i][j][k+1][2][0];\n\tlhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1]\n\t - tmp1 * njac[i][j][k+1][2][1];\n\tlhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2]\n\t - tmp1 * njac[i][j][k+1][2][2]\n\t - tmp1 * dz3;\n\tlhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3]\n\t - tmp1 * njac[i][j][k+1][2][3];\n\tlhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4]\n\t - tmp1 * njac[i][j][k+1][2][4];\n\n\tlhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0]\n\t - tmp1 * njac[i][j][k+1][3][0];\n\tlhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1]\n\t - tmp1 * njac[i][j][k+1][3][1];\n\tlhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2]\n\t - tmp1 * njac[i][j][k+1][3][2];\n\tlhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3]\n\t - tmp1 * njac[i][j][k+1][3][3]\n\t - tmp1 * dz4;\n\tlhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4]\n\t - tmp1 * njac[i][j][k+1][3][4];\n\n\tlhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0]\n\t - tmp1 * njac[i][j][k+1][4][0];\n\tlhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1]\n\t - tmp1 * njac[i][j][k+1][4][1];\n\tlhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2]\n\t - tmp1 * njac[i][j][k+1][4][2];\n\tlhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3]\n\t - tmp1 * njac[i][j][k+1][4][3];\n\tlhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4]\n\t - tmp1 * njac[i][j][k+1][4][4]\n\t - tmp1 * dz5;\n\n }\n }\n } #pragma omp parallel for private(tmp1, tmp2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2)", "context_chars": 100, "text": "--*/\n #pragma omp parallel for private(tmp1, tmp2) \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n\ttmp1 = dt * tz1;\n\ttmp2 = dt * tz2;\n\n\tlhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0]\n\t - tmp1 * njac[i][j][k-1][0][0]\n\t - tmp1 * dz1;\n\tlhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1]\n\t - tmp1 * njac[i][j][k-1][0][1];\n\tlhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2]\n\t - tmp1 * njac[i][j][k-1][0][2];\n\tlhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3]\n\t - tmp1 * njac[i][j][k-1][0][3];\n\tlhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4]\n\t - tmp1 * njac[i][j][k-1][0][4];\n\n\tlhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0]\n\t - tmp1 * njac[i][j][k-1][1][0];\n\tlhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1]\n\t - tmp1 * njac[i][j][k-1][1][1]\n\t - tmp1 * dz2;\n\tlhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2]\n\t - tmp1 * njac[i][j][k-1][1][2];\n\tlhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3]\n\t - tmp1 * njac[i][j][k-1][1][3];\n\tlhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4]\n\t - tmp1 * njac[i][j][k-1][1][4];\n\n\tlhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0]\n\t - tmp1 * njac[i][j][k-1][2][0];\n\tlhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1]\n\t - tmp1 * njac[i][j][k-1][2][1];\n\tlhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2]\n\t - tmp1 * njac[i][j][k-1][2][2]\n\t - tmp1 * dz3;\n\tlhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3]\n\t - tmp1 * njac[i][j][k-1][2][3];\n\tlhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4]\n\t - tmp1 * njac[i][j][k-1][2][4];\n\n\tlhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0]\n\t - tmp1 * njac[i][j][k-1][3][0];\n\tlhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1]\n\t - tmp1 * njac[i][j][k-1][3][1];\n\tlhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2]\n\t - tmp1 * njac[i][j][k-1][3][2];\n\tlhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3]\n\t - tmp1 * njac[i][j][k-1][3][3]\n\t - tmp1 * dz4;\n\tlhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4]\n\t - tmp1 * njac[i][j][k-1][3][4];\n\n\tlhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0]\n\t - tmp1 * njac[i][j][k-1][4][0];\n\tlhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1]\n\t - tmp1 * njac[i][j][k-1][4][1];\n\tlhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2]\n\t - tmp1 * njac[i][j][k-1][4][2];\n\tlhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3]\n\t - tmp1 * njac[i][j][k-1][4][3];\n\tlhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4]\n\t - tmp1 * njac[i][j][k-1][4][4]\n\t - tmp1 * dz5;\n\n\tlhs[i][j][k][BB][0][0] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][0][0]\n\t + tmp1 * 2.0 * dz1;\n\tlhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n\tlhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n\tlhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n\tlhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n\n\tlhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n\tlhs[i][j][k][BB][1][1] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][1][1]\n\t + tmp1 * 2.0 * dz2;\n\tlhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n\tlhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n\tlhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n\n\tlhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n\tlhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n\tlhs[i][j][k][BB][2][2] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][2][2]\n\t + tmp1 * 2.0 * dz3;\n\tlhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n\tlhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n\n\tlhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n\tlhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n\tlhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n\tlhs[i][j][k][BB][3][3] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][3][3]\n\t + tmp1 * 2.0 * dz4;\n\tlhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n\n\tlhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n\tlhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n\tlhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n\tlhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n\tlhs[i][j][k][BB][4][4] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][4][4]\n\t + tmp1 * 2.0 * dz5;\n\n\tlhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0]\n\t - tmp1 * njac[i][j][k+1][0][0]\n\t - tmp1 * dz1;\n\tlhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1]\n\t - tmp1 * njac[i][j][k+1][0][1];\n\tlhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2]\n\t - tmp1 * njac[i][j][k+1][0][2];\n\tlhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3]\n\t - tmp1 * njac[i][j][k+1][0][3];\n\tlhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4]\n\t - tmp1 * njac[i][j][k+1][0][4];\n\n\tlhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0]\n\t - tmp1 * njac[i][j][k+1][1][0];\n\tlhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1]\n\t - tmp1 * njac[i][j][k+1][1][1]\n\t - tmp1 * dz2;\n\tlhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2]\n\t - tmp1 * njac[i][j][k+1][1][2];\n\tlhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3]\n\t - tmp1 * njac[i][j][k+1][1][3];\n\tlhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4]\n\t - tmp1 * njac[i][j][k+1][1][4];\n\n\tlhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0]\n\t - tmp1 * njac[i][j][k+1][2][0];\n\tlhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1]\n\t - tmp1 * njac[i][j][k+1][2][1];\n\tlhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2]\n\t - tmp1 * njac[i][j][k+1][2][2]\n\t - tmp1 * dz3;\n\tlhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3]\n\t - tmp1 * njac[i][j][k+1][2][3];\n\tlhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4]\n\t - tmp1 * njac[i][j][k+1][2][4];\n\n\tlhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0]\n\t - tmp1 * njac[i][j][k+1][3][0];\n\tlhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1]\n\t - tmp1 * njac[i][j][k+1][3][1];\n\tlhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2]\n\t - tmp1 * njac[i][j][k+1][3][2];\n\tlhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3]\n\t - tmp1 * njac[i][j][k+1][3][3]\n\t - tmp1 * dz4;\n\tlhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4]\n\t - tmp1 * njac[i][j][k+1][3][4];\n\n\tlhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0]\n\t - tmp1 * njac[i][j][k+1][4][0];\n\tlhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1]\n\t - tmp1 * njac[i][j][k+1][4][1];\n\tlhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2]\n\t - tmp1 * njac[i][j][k+1][4][2];\n\tlhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3]\n\t - tmp1 * njac[i][j][k+1][4][3];\n\tlhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4]\n\t - tmp1 * njac[i][j][k+1][4][4]\n\t - tmp1 * dz5;\n\n }\n } #pragma omp parallel for private(tmp1, tmp2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(rho_inv)", "context_chars": 100, "text": " and the speed of sound.\nc-------------------------------------------------------------------*/\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for private(rho_inv)\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for private(rho_inv)\n for (k = 0; k < grid_points[2]; k++) {\n\trho_inv = 1.0/u[i][j][k][0];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[i][j][k][1] * rho_inv;\n\tvs[i][j][k] = u[i][j][k][2] * rho_inv;\n\tws[i][j][k] = u[i][j][k][3] * rho_inv;\n\tsquare[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + \n\t\t\t\t u[i][j][k][2]*u[i][j][k][2] +\n\t\t\t\t u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n }\n }\n } #pragma omp parallel for private(rho_inv)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(rho_inv)", "context_chars": 100, "text": "---------*/\n #pragma omp parallel for private(rho_inv)\n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for private(rho_inv)\n for (k = 0; k < grid_points[2]; k++) {\n\trho_inv = 1.0/u[i][j][k][0];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[i][j][k][1] * rho_inv;\n\tvs[i][j][k] = u[i][j][k][2] * rho_inv;\n\tws[i][j][k] = u[i][j][k][3] * rho_inv;\n\tsquare[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + \n\t\t\t\t u[i][j][k][2]*u[i][j][k][2] +\n\t\t\t\t u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n }\n } #pragma omp parallel for private(rho_inv)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(rho_inv)", "context_chars": 100, "text": "++) {\n #pragma omp parallel for private(rho_inv)\n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\trho_inv = 1.0/u[i][j][k][0];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[i][j][k][1] * rho_inv;\n\tvs[i][j][k] = u[i][j][k][2] * rho_inv;\n\tws[i][j][k] = u[i][j][k][3] * rho_inv;\n\tsquare[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + \n\t\t\t\t u[i][j][k][2]*u[i][j][k][2] +\n\t\t\t\t u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n } #pragma omp parallel for private(rho_inv)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "undary \nc-------------------------------------------------------------------*/\n\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = forcing[i][j][k][m];\n\t}\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------------------*/\n\n #pragma omp parallel for\n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = forcing[i][j][k][m];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "d_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = forcing[i][j][k][m];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "id_points[1]; j++) {\n #pragma omp parallel for \n for (k = 0; k < grid_points[2]; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = forcing[i][j][k][m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(uijk, up1, um1) ", "context_chars": 100, "text": "mpute xi-direction fluxes \nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for private(uijk, up1, um1)\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for private(uijk, up1, um1)\n for (k = 1; k < grid_points[2]-1; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * \n\t (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + \n\t u[i-1][j][k][0]) -\n\t tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);\n\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * \n\t (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + \n\t u[i-1][j][k][1]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[i+1][j][k][1]*up1 - \n\t\t u[i-1][j][k][1]*um1 +\n\t\t (u[i+1][j][k][4]- square[i+1][j][k]-\n\t\t u[i-1][j][k][4]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * \n\t (u[i+1][j][k][2] - 2.0*u[i][j][k][2] +\n\t u[i-1][j][k][2]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][2]*up1 - \n\t\t u[i-1][j][k][2]*um1);\n\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * \n\t (u[i+1][j][k][3] - 2.0*u[i][j][k][3] +\n\t u[i-1][j][k][3]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][3]*up1 - \n\t\t u[i-1][j][k][3]*um1);\n\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * \n\t (u[i+1][j][k][4] - 2.0*u[i][j][k][4] +\n\t u[i-1][j][k][4]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i-1][j][k][4]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[i+1][j][k][4] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[i-1][j][k][4] - \n\t\t c2*square[i-1][j][k])*um1 );\n }\n }\n } #pragma omp parallel for private(uijk, up1, um1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(uijk, up1, um1)", "context_chars": 100, "text": "\n #pragma omp parallel for private(uijk, up1, um1) \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for private(uijk, up1, um1)\n for (k = 1; k < grid_points[2]-1; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * \n\t (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + \n\t u[i-1][j][k][0]) -\n\t tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);\n\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * \n\t (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + \n\t u[i-1][j][k][1]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[i+1][j][k][1]*up1 - \n\t\t u[i-1][j][k][1]*um1 +\n\t\t (u[i+1][j][k][4]- square[i+1][j][k]-\n\t\t u[i-1][j][k][4]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * \n\t (u[i+1][j][k][2] - 2.0*u[i][j][k][2] +\n\t u[i-1][j][k][2]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][2]*up1 - \n\t\t u[i-1][j][k][2]*um1);\n\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * \n\t (u[i+1][j][k][3] - 2.0*u[i][j][k][3] +\n\t u[i-1][j][k][3]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][3]*up1 - \n\t\t u[i-1][j][k][3]*um1);\n\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * \n\t (u[i+1][j][k][4] - 2.0*u[i][j][k][4] +\n\t u[i-1][j][k][4]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i-1][j][k][4]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[i+1][j][k][4] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[i-1][j][k][4] - \n\t\t c2*square[i-1][j][k])*um1 );\n }\n } #pragma omp parallel for private(uijk, up1, um1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(uijk, up1, um1)", "context_chars": 100, "text": "#pragma omp parallel for private(uijk, up1, um1)\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * \n\t (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + \n\t u[i-1][j][k][0]) -\n\t tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);\n\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * \n\t (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + \n\t u[i-1][j][k][1]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[i+1][j][k][1]*up1 - \n\t\t u[i-1][j][k][1]*um1 +\n\t\t (u[i+1][j][k][4]- square[i+1][j][k]-\n\t\t u[i-1][j][k][4]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * \n\t (u[i+1][j][k][2] - 2.0*u[i][j][k][2] +\n\t u[i-1][j][k][2]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][2]*up1 - \n\t\t u[i-1][j][k][2]*um1);\n\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * \n\t (u[i+1][j][k][3] - 2.0*u[i][j][k][3] +\n\t u[i-1][j][k][3]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][3]*up1 - \n\t\t u[i-1][j][k][3]*um1);\n\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * \n\t (u[i+1][j][k][4] - 2.0*u[i][j][k][4] +\n\t u[i-1][j][k][4]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i-1][j][k][4]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[i+1][j][k][4] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[i-1][j][k][4] - \n\t\t c2*square[i-1][j][k])*um1 );\n } #pragma omp parallel for private(uijk, up1, um1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "on \nc-------------------------------------------------------------------*/\n i = 1;\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +\n\t u[i+2][j][k][m]);\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "--------------*/\n i = 1;\n #pragma omp parallel for\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +\n\t u[i+2][j][k][m]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "points[1]-1; j++) {\n #pragma omp parallel for\n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +\n\t u[i+2][j][k][m]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\t ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +\n\t u[i+2][j][k][m]);\n }\n }\n }\n\n i = 2;\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n }\n\n i = 2;\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-1][j][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);\n }\n }\n }\n\n for (i = 3; i < grid_points[0]-3; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + \n\t u[i+2][j][k][m] );\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[m]);\n }\n }\n }\n\n #pragma omp parallel for \n for (i = 3; i < grid_points[0]-3; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + \n\t u[i+2][j][k][m] );\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[0]-3; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + \n\t u[i+2][j][k][m] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "oints[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + \n\t u[i+2][j][k][m] );\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "+1][j][k][m] + \n\t u[i+2][j][k][m] );\n\t}\n }\n }\n }\n \n i = grid_points[0]-3;\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n i = grid_points[0]-3;\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );\n }\n }\n }\n\n i = grid_points[0]-2;\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +\n\t 5.0*u[i][j][k][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n i = grid_points[0]-2;\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +\n\t 5.0*u[i][j][k][m] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +\n\t 5.0*u[i][j][k][m] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(vijk, vp1, vm1)", "context_chars": 100, "text": "pute eta-direction fluxes \nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for private(vijk, vp1, vm1)\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for private(vijk, vp1, vm1)\n for (k = 1; k < grid_points[2]-1; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * \n\t (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + \n\t u[i][j-1][k][0]) -\n\t ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * \n\t (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + \n\t u[i][j-1][k][1]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][1]*vp1 - \n\t\t u[i][j-1][k][1]*vm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * \n\t (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + \n\t u[i][j-1][k][2]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[i][j+1][k][2]*vp1 - \n\t\t u[i][j-1][k][2]*vm1 +\n\t\t (u[i][j+1][k][4] - square[i][j+1][k] - \n\t\t u[i][j-1][k][4] + square[i][j-1][k])\n\t\t *c2);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * \n\t (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + \n\t u[i][j-1][k][3]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][3]*vp1 - \n\t\t u[i][j-1][k][3]*vm1);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * \n\t (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + \n\t u[i][j-1][k][4]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j-1][k][4]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[i][j+1][k][4] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[i][j-1][k][4] - \n\t\t c2*square[i][j-1][k]) * vm1);\n }\n }\n } #pragma omp parallel for private(vijk, vp1, vm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(vijk, vp1, vm1)", "context_chars": 100, "text": "/\n #pragma omp parallel for private(vijk, vp1, vm1)\n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for private(vijk, vp1, vm1)\n for (k = 1; k < grid_points[2]-1; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * \n\t (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + \n\t u[i][j-1][k][0]) -\n\t ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * \n\t (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + \n\t u[i][j-1][k][1]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][1]*vp1 - \n\t\t u[i][j-1][k][1]*vm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * \n\t (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + \n\t u[i][j-1][k][2]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[i][j+1][k][2]*vp1 - \n\t\t u[i][j-1][k][2]*vm1 +\n\t\t (u[i][j+1][k][4] - square[i][j+1][k] - \n\t\t u[i][j-1][k][4] + square[i][j-1][k])\n\t\t *c2);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * \n\t (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + \n\t u[i][j-1][k][3]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][3]*vp1 - \n\t\t u[i][j-1][k][3]*vm1);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * \n\t (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + \n\t u[i][j-1][k][4]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j-1][k][4]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[i][j+1][k][4] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[i][j-1][k][4] - \n\t\t c2*square[i][j-1][k]) * vm1);\n }\n } #pragma omp parallel for private(vijk, vp1, vm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(vijk, vp1, vm1)", "context_chars": 100, "text": "#pragma omp parallel for private(vijk, vp1, vm1)\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * \n\t (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + \n\t u[i][j-1][k][0]) -\n\t ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * \n\t (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + \n\t u[i][j-1][k][1]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][1]*vp1 - \n\t\t u[i][j-1][k][1]*vm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * \n\t (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + \n\t u[i][j-1][k][2]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[i][j+1][k][2]*vp1 - \n\t\t u[i][j-1][k][2]*vm1 +\n\t\t (u[i][j+1][k][4] - square[i][j+1][k] - \n\t\t u[i][j-1][k][4] + square[i][j-1][k])\n\t\t *c2);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * \n\t (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + \n\t u[i][j-1][k][3]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][3]*vp1 - \n\t\t u[i][j-1][k][3]*vm1);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * \n\t (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + \n\t u[i][j-1][k][4]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j-1][k][4]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[i][j+1][k][4] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[i][j-1][k][4] - \n\t\t c2*square[i][j-1][k]) * vm1);\n } #pragma omp parallel for private(vijk, vp1, vm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "sipation \nc-------------------------------------------------------------------*/\n j = 1;\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +\n\t u[i][j+2][k][m]);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------*/\n j = 1;\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +\n\t u[i][j+2][k][m]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "oints[0]-1; i++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +\n\t u[i][j+2][k][m]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +\n\t u[i][j+2][k][m]);\n }\n }\n }\n\n j = 2;\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " }\n }\n }\n\n j = 2;\n #pragma omp parallel for\n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "points[0]-1; i++) {\n #pragma omp parallel for\n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][j-1][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);\n }\n }\n }\n\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 3; j < grid_points[1]-3; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + \n\t u[i][j+2][k][m] );\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[m]);\n }\n }\n }\n\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 3; j < grid_points[1]-3; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + \n\t u[i][j+2][k][m] );\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "oints[0]-1; i++) {\n #pragma omp parallel for \n for (j = 3; j < grid_points[1]-3; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + \n\t u[i][j+2][k][m] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "points[1]-3; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + \n\t u[i][j+2][k][m] );\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][j+1][k][m] + \n\t u[i][j+2][k][m] );\n\t}\n }\n }\n }\n \n j = grid_points[1]-3;\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n j = grid_points[1]-3;\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "points[0]-1; i++) {\n #pragma omp parallel for\n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );\n }\n }\n }\n\n j = grid_points[1]-2;\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +\n\t 5.*u[i][j][k][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n j = grid_points[1]-2;\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +\n\t 5.*u[i][j][k][m] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "oints[0]-1; i++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +\n\t 5.*u[i][j][k][m] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(wijk, wp1, wm1)", "context_chars": 100, "text": "ute zeta-direction fluxes \nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for private(wijk, wp1, wm1) \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for private(wijk, wp1, wm1) \n for (k = 1; k < grid_points[2]-1; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * \n\t (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + \n\t u[i][j][k-1][0]) -\n\t tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * \n\t (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + \n\t u[i][j][k-1][1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][1]*wp1 - \n\t\t u[i][j][k-1][1]*wm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * \n\t (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + \n\t u[i][j][k-1][2]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][2]*wp1 - \n\t\t u[i][j][k-1][2]*wm1);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * \n\t (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + \n\t u[i][j][k-1][3]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[i][j][k+1][3]*wp1 - \n\t\t u[i][j][k-1][3]*wm1 +\n\t\t (u[i][j][k+1][4] - square[i][j][k+1] - \n\t\t u[i][j][k-1][4] + square[i][j][k-1])\n\t\t *c2);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * \n\t (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + \n\t u[i][j][k-1][4]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j][k-1][4]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[i][j][k+1][4] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[i][j][k-1][4] - \n\t\t c2*square[i][j][k-1])*wm1);\n }\n }\n } #pragma omp parallel for private(wijk, wp1, wm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(wijk, wp1, wm1) ", "context_chars": 100, "text": "/\n #pragma omp parallel for private(wijk, wp1, wm1)\n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for private(wijk, wp1, wm1) \n for (k = 1; k < grid_points[2]-1; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * \n\t (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + \n\t u[i][j][k-1][0]) -\n\t tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * \n\t (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + \n\t u[i][j][k-1][1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][1]*wp1 - \n\t\t u[i][j][k-1][1]*wm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * \n\t (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + \n\t u[i][j][k-1][2]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][2]*wp1 - \n\t\t u[i][j][k-1][2]*wm1);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * \n\t (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + \n\t u[i][j][k-1][3]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[i][j][k+1][3]*wp1 - \n\t\t u[i][j][k-1][3]*wm1 +\n\t\t (u[i][j][k+1][4] - square[i][j][k+1] - \n\t\t u[i][j][k-1][4] + square[i][j][k-1])\n\t\t *c2);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * \n\t (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + \n\t u[i][j][k-1][4]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j][k-1][4]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[i][j][k+1][4] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[i][j][k-1][4] - \n\t\t c2*square[i][j][k-1])*wm1);\n }\n } #pragma omp parallel for private(wijk, wp1, wm1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(wijk, wp1, wm1) ", "context_chars": 100, "text": "pragma omp parallel for private(wijk, wp1, wm1) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * \n\t (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + \n\t u[i][j][k-1][0]) -\n\t tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * \n\t (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + \n\t u[i][j][k-1][1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][1]*wp1 - \n\t\t u[i][j][k-1][1]*wm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * \n\t (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + \n\t u[i][j][k-1][2]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][2]*wp1 - \n\t\t u[i][j][k-1][2]*wm1);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * \n\t (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + \n\t u[i][j][k-1][3]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[i][j][k+1][3]*wp1 - \n\t\t u[i][j][k-1][3]*wm1 +\n\t\t (u[i][j][k+1][4] - square[i][j][k+1] - \n\t\t u[i][j][k-1][4] + square[i][j][k-1])\n\t\t *c2);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * \n\t (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + \n\t u[i][j][k-1][4]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j][k-1][4]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[i][j][k+1][4] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[i][j][k-1][4] - \n\t\t c2*square[i][j][k-1])*wm1);\n } #pragma omp parallel for private(wijk, wp1, wm1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "n \nc-------------------------------------------------------------------*/\n k = 1;\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +\n\t u[i][j][k+2][m]);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-------------*/\n k = 1;\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +\n\t u[i][j][k+2][m]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "oints[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +\n\t u[i][j][k+2][m]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +\n\t u[i][j][k+2][m]);\n }\n }\n }\n\n k = 2;\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n }\n\n k = 2;\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "oints[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][j][k-1][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);\n }\n }\n }\n\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 3; k < grid_points[2]-3; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + \n\t u[i][j][k+2][m] );\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[m]);\n }\n }\n }\n\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 3; k < grid_points[2]-3; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + \n\t u[i][j][k+2][m] );\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "oints[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 3; k < grid_points[2]-3; k++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + \n\t u[i][j][k+2][m] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 3; k < grid_points[2]-3; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + \n\t u[i][j][k+2][m] );\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][j][k+1][m] + \n\t u[i][j][k+2][m] );\n\t}\n }\n }\n }\n \n k = grid_points[2]-3;\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n k = grid_points[2]-3;\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "oints[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );\n }\n }\n }\n\n k = grid_points[2]-2;\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +\n\t 5.0*u[i][j][k][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n k = grid_points[2]-2;\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +\n\t 5.0*u[i][j][k][m] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "oints[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +\n\t 5.0*u[i][j][k][m] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +\n\t 5.0*u[i][j][k][m] );\n }\n }\n }\n\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (i = 1; i < grid_points[0]-1; i++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] * dt;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "] );\n }\n }\n }\n\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (i = 1; i < grid_points[0]-1; i++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] * dt;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "oints[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (i = 1; i < grid_points[0]-1; i++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] * dt;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\tfor (i = 1; i < grid_points[0]-1; i++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] * dt;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "----------------------------------------*/\n error_norm(xce);\n compute_rhs();\n\n rhs_norm(xcr);\n\n for (m = 0; m < 5; m++) {\n xcr[m] = xcr[m] / dt;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " \n for (m = 0; m < 5; m++) {\n xcr[m] = xcr[m] / dt;\n }\n\n *class = 'U';\n *verified = TRUE;\n\n for (m = 0; m < 5; m++) {\n xcrref[m] = 1.0;\n xceref[m] = 1.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "he known reference values.\nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n \n xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);\n xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);\n \n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "--------------------------*/\n\n int i, j, k, m, n;\n\n for (i = grid_points[0]-2; i >= 0; i--) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m]\n\t - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];\n\t }\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "2; i >= 0; i--) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m]\n\t - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];\n\t }\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "points[1]-1; j++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m]\n\t - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];\n\t }\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " - sweeping in i direction\nc-------------------------------------------------------------------*/\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n/*--------------------------------------------------------------------\nc multiply c(0,j,k) by b_inverse and copy back to c\nc multiply rhs(0) by b_inverse(0) and copy to rhs\nc-------------------------------------------------------------------*/\n binvcrhs( lhs[0][j][k][BB],\n\t\tlhs[0][j][k][CC],\n\t\trhs[0][j][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-------------------------------------------------------------*/\n for (i = 1; i < isize; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n/*--------------------------------------------------------------------\nc rhs(i) = rhs(i) - A*rhs(i-1)\nc-------------------------------------------------------------------*/\n\tmatvec_sub(lhs[i][j][k][AA],\n\t\t rhs[i-1][j][k], rhs[i][j][k]);\n\n/*--------------------------------------------------------------------\nc B(i) = B(i) - C(i-1)*A(i)\nc-------------------------------------------------------------------*/\n\tmatmul_sub(lhs[i][j][k][AA],\n\t\t lhs[i-1][j][k][CC],\n\t\t lhs[i][j][k][BB]);\n\n\n/*--------------------------------------------------------------------\nc multiply c(i,j,k) by b_inverse and copy back to c\nc multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs\nc-------------------------------------------------------------------*/\n\tbinvcrhs( lhs[i][j][k][BB],\n\t\t lhs[i][j][k][CC],\n\t\t rhs[i][j][k] );\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "----*/\n\tbinvcrhs( lhs[i][j][k][BB],\n\t\t lhs[i][j][k][CC],\n\t\t rhs[i][j][k] );\n\n }\n }\n }\n\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n/*--------------------------------------------------------------------\nc rhs(isize) = rhs(isize) - A*rhs(isize-1)\nc-------------------------------------------------------------------*/\n matvec_sub(lhs[isize][j][k][AA],\n\t\t rhs[isize-1][j][k], rhs[isize][j][k]);\n\n/*--------------------------------------------------------------------\nc B(isize) = B(isize) - C(isize-1)*A(isize)\nc-------------------------------------------------------------------*/\n matmul_sub(lhs[isize][j][k][AA],\n\t\t lhs[isize-1][j][k][CC],\n\t\t lhs[isize][j][k][BB]);\n\n/*--------------------------------------------------------------------\nc multiply rhs() by b_inverse() and copy to rhs\nc-------------------------------------------------------------------*/\n binvrhs( lhs[i][j][k][BB],\n\t rhs[i][j][k] );\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "--------------------*/\n\n int i, j, k, m, n;\n \n for (j = grid_points[1]-2; j >= 0; j--) {\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] \n\t - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];\n\t }\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-2; j >= 0; j--) {\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] \n\t - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];\n\t }\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "points[0]-1; i++) {\n #pragma omp parallel for \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] \n\t - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];\n\t }\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------------------------------------*/\n\n int i, j, k, jsize;\n\n jsize = grid_points[1]-1;\n\n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n/*--------------------------------------------------------------------\nc multiply c(i,0,k) by b_inverse and copy back to c\nc multiply rhs(0) by b_inverse(0) and copy to rhs\nc-------------------------------------------------------------------*/\n binvcrhs( lhs[i][0][k][BB],\n\t\tlhs[i][0][k][CC],\n\t\trhs[i][0][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-------------------------------------------------------------*/\n for (j = 1; j < jsize; j++) {\n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n/*--------------------------------------------------------------------\nc subtract A*lhs_vector(j-1) from lhs_vector(j)\nc \nc rhs(j) = rhs(j) - A*rhs(j-1)\nc-------------------------------------------------------------------*/\n\tmatvec_sub(lhs[i][j][k][AA],\n\t\t rhs[i][j-1][k], rhs[i][j][k]);\n\n/*--------------------------------------------------------------------\nc B(j) = B(j) - C(j-1)*A(j)\nc-------------------------------------------------------------------*/\n\tmatmul_sub(lhs[i][j][k][AA],\n\t\t lhs[i][j-1][k][CC],\n\t\t lhs[i][j][k][BB]);\n\n/*--------------------------------------------------------------------\nc multiply c(i,j,k) by b_inverse and copy back to c\nc multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs\nc-------------------------------------------------------------------*/\n\tbinvcrhs( lhs[i][j][k][BB],\n\t\t lhs[i][j][k][CC],\n\t\t rhs[i][j][k] );\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "----*/\n\tbinvcrhs( lhs[i][j][k][BB],\n\t\t lhs[i][j][k][CC],\n\t\t rhs[i][j][k] );\n\n }\n }\n }\n\n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n/*--------------------------------------------------------------------\nc rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)\nc-------------------------------------------------------------------*/\n matvec_sub(lhs[i][jsize][k][AA],\n\t\t rhs[i][jsize-1][k], rhs[i][jsize][k]);\n\n/*--------------------------------------------------------------------\nc B(jsize) = B(jsize) - C(jsize-1)*A(jsize)\nc call matmul_sub(aa,i,jsize,k,c,\nc $ cc,i,jsize-1,k,c,BB,i,jsize,k)\nc-------------------------------------------------------------------*/\n matmul_sub(lhs[i][jsize][k][AA],\n\t\t lhs[i][jsize-1][k][CC],\n\t\t lhs[i][jsize][k][BB]);\n\n/*--------------------------------------------------------------------\nc multiply rhs(jsize) by b_inverse(jsize) and copy to rhs\nc-------------------------------------------------------------------*/\n binvrhs( lhs[i][jsize][k][BB],\n\t rhs[i][jsize][k] );\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ell\nc-------------------------------------------------------------------*/\n\n int i, j, k, m, n;\n\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = grid_points[2]-2; k >= 0; k--) {\n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] \n\t - lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n];\n\t }\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "/\n\n int i, j, k, m, n;\n\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = grid_points[2]-2; k >= 0; k--) {\n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] \n\t - lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n];\n\t }\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " - sweeping in i direction\nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n\n/*--------------------------------------------------------------------\nc multiply c(i,j,0) by b_inverse and copy back to c\nc multiply rhs(0) by b_inverse(0) and copy to rhs\nc-------------------------------------------------------------------*/\n binvcrhs( lhs[i][j][0][BB],\n\t\tlhs[i][j][0][CC],\n\t\trhs[i][j][0] );\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------------------------------------------------------*/\n for (k = 1; k < ksize; k++) {\n for (i = 1; i < grid_points[0]-1; i++) {\n\t for (j = 1; j < grid_points[1]-1; j++) {\n\n/*--------------------------------------------------------------------\nc subtract A*lhs_vector(k-1) from lhs_vector(k)\nc \nc rhs(k) = rhs(k) - A*rhs(k-1)\nc-------------------------------------------------------------------*/\n\tmatvec_sub(lhs[i][j][k][AA],\n\t\t rhs[i][j][k-1], rhs[i][j][k]);\n\n/*--------------------------------------------------------------------\nc B(k) = B(k) - C(k-1)*A(k)\nc call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k)\nc-------------------------------------------------------------------*/\n\tmatmul_sub(lhs[i][j][k][AA],\n\t\t lhs[i][j][k-1][CC],\n\t\t lhs[i][j][k][BB]);\n\n/*--------------------------------------------------------------------\nc multiply c(i,j,k) by b_inverse and copy back to c\nc multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs\nc-------------------------------------------------------------------*/\n\tbinvcrhs( lhs[i][j][k][BB],\n\t\t lhs[i][j][k][CC],\n\t\t rhs[i][j][k] );\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "pecial cases for last cell\nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n\n/*--------------------------------------------------------------------\nc rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)\nc-------------------------------------------------------------------*/\n matvec_sub(lhs[i][j][ksize][AA],\n\t\t rhs[i][j][ksize-1], rhs[i][j][ksize]);\n\n/*--------------------------------------------------------------------\nc B(ksize) = B(ksize) - C(ksize-1)*A(ksize)\nc call matmul_sub(aa,i,j,ksize,c,\nc $ cc,i,j,ksize-1,c,BB,i,j,ksize)\nc-------------------------------------------------------------------*/\n matmul_sub(lhs[i][j][ksize][AA],\n\t\t lhs[i][j][ksize-1][CC],\n\t\t lhs[i][j][ksize][BB]);\n\n/*--------------------------------------------------------------------\nc multiply rhs(ksize) by b_inverse(ksize) and copy to rhs\nc-------------------------------------------------------------------*/\n binvrhs( lhs[i][j][ksize][BB],\n\t rhs[i][j][ksize] );\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lastcol-firstcol+1)\nc---------------------------------------------------------------------*/\n{\t\n for (j = 1; j <= lastrow - firstrow + 1; j++) {\n\t#pragma omp parallel for \n\tfor (k = rowstr[j]; k < rowstr[j+1]; k++) {\n colidx[k] = colidx[k] - firstcol + 1;\n\t}\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------*/\n{\t\n #pragma omp parallel for\n for (j = 1; j <= lastrow - firstrow + 1; j++) {\n\tfor (k = rowstr[j]; k < rowstr[j+1]; k++) {\n colidx[k] = colidx[k] - firstcol + 1;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "vector to (1, 1, .... 1)\nc-------------------------------------------------------------------*/\n for (i = 1; i <= NA+1; i++) {\n\tx[i] = 1.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-------*/\n #pragma omp parallel for \n for (i = 1; i <= NA+1; i++) {\n\tx[i] = 1.0;\n }\n for (j = 1; j <= lastcol-firstcol+1; j++) {\n q[j] = 0.0;\n z[j] = 0.0;\n r[j] = 0.0;\n p[j] = 0.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:norm_temp11) reduction(+:norm_temp12)", "context_chars": 100, "text": "-------------------------------------------------*/\n\tnorm_temp11 = 0.0;\n\tnorm_temp12 = 0.0;\n for (j = 1; j <= lastcol-firstcol+1; j++) {\n norm_temp11 = norm_temp11 + x[j]*z[j];\n norm_temp12 = norm_temp12 + z[j]*z[j];\n\t} #pragma omp parallel for reduction(+:norm_temp11) reduction(+:norm_temp12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "lize the CG algorithm:\nc-------------------------------------------------------------------*/\n{\n for (j = 1; j <= naa+1; j++) {\n\tq[j] = 0.0;\n\tz[j] = 0.0;\n\tr[j] = x[j];\n\tp[j] = r[j];\n\t//w[j] = 0.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:rho) ", "context_chars": 100, "text": "of r elements locally...\nc-------------------------------------------------------------------*/\n for (j = 1; j <= lastcol-firstcol+1; j++) {\n\trho = rho + r[j]*r[j];\n } #pragma omp parallel for reduction(+:rho) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(sum) ", "context_chars": 100, "text": "C on the Cray t3d - overall speed of code is 1.5 times faster.\n*/\n\n/* rolled version */ \n\tfor (j = 1; j <= lastrow-firstrow+1; j++) {\n sum = 0.0;\n\t for (k = rowstr[j]; k < rowstr[j+1]; k++) {\n\t\tsum = sum + a[k]*p[colidx[k]];\n\t }\n //w[j] = sum;\n q[j] = sum;\n\t} #pragma omp parallel for private(sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:d) ", "context_chars": 100, "text": "-------------\nc Obtain p.q\nc-------------------------------------------------------------------*/\n\tfor (j = 1; j <= lastcol-firstcol+1; j++) {\n d = d + p[j]*q[j];\n\t} #pragma omp parallel for reduction(+:d) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:rho) ", "context_chars": 100, "text": "c and r = r - alpha*q\nc---------------------------------------------------------------------*/\n\tfor (j = 1; j <= lastcol-firstcol+1; j++) {\n z[j] = z[j] + alpha*p[j];\n r[j] = r[j] - alpha*q[j];\n//\t} #pragma omp parallel for reduction(+:rho) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------\nc p = r + beta*p\nc-------------------------------------------------------------------*/\n\tfor (j = 1; j <= lastcol-firstcol+1; j++) {\n p[j] = r[j] + beta*p[j];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(d) ", "context_chars": 100, "text": "\nc---------------------------------------------------------------------*/\n sum = 0.0;\n \n{\n for (j = 1; j <= lastrow-firstrow+1; j++) {\n\td = 0.0;\n\tfor (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {\n d = d + a[k]*z[colidx[k]];\n\t}\n\tr[j] = d;\n } #pragma omp parallel for private(d) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(d) reduction(+:sum) ", "context_chars": 100, "text": "is point, r contains A.z\nc-------------------------------------------------------------------*/\n for (j = 1; j <= lastcol-firstcol+1; j++) {\n\td = x[j] - r[j];\n\tsum = sum + d*d;\n } #pragma omp parallel for private(d) reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "mark nonzero positions\nc---------------------------------------------------------------------*/\n for (i = 1; i <= n; i++) {\n\tcolidx[n+i] = 0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "r of triples in each row\nc-------------------------------------------------------------------*/\n for (j = 1; j <= n; j++) {\n\trowstr[j] = 0;\n\tmark[j] = FALSE;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": ". preload data pages\nc---------------------------------------------------------------------*/\n for(j = 0;j <= nrows-1;j++) {\n #pragma omp parallel for firstprivate(j) \n for(k = rowstr[j];k <= rowstr[j+1]-1;k++)\n\t a[k] = 0.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j) ", "context_chars": 100, "text": "-------------------*/\n #pragma omp parallel for \n for(j = 0;j <= nrows-1;j++) {\n for(k = rowstr[j];k <= rowstr[j+1]-1;k++)\n\t a[k] = 0.0;\n }\n/*--------------------------------------------------------------------\nc ... do a bucket sort of the triples on the row index\nc-------------------------------------------------------------------*/\n for (nza = 1; nza <= nnza; nza++) {\n\tj = arow[nza] - firstrow + 1;\n\tk = rowstr[j];\n\ta[k] = aelt[nza];\n\tcolidx[k] = acol[nza];\n\trowstr[j] = rowstr[j] + 1;\n } #pragma omp parallel for firstprivate(j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ng elements\nc-------------------------------------------------------------------*/\n nza = 0;\n for (i = 1; i <= n; i++) {\n\tx[i] = 0.0;\n\tmark[i] = FALSE;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "--------------------------------------*/\n int i, j, m;\n double tmp, tmp1;\n double tmat[5][5];\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\tv[i][j][k][m] = v[i][j][k][m]\n\t - omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]\n\t\t + ldz[i][j][m][1] * v[i][j][k-1][1]\n\t\t + ldz[i][j][m][2] * v[i][j][k-1][2]\n\t\t + ldz[i][j][m][3] * v[i][j][k-1][3]\n\t\t + ldz[i][j][m][4] * v[i][j][k-1][4] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " tmp, tmp1;\n double tmat[5][5];\n\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\tv[i][j][k][m] = v[i][j][k][m]\n\t - omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]\n\t\t + ldz[i][j][m][1] * v[i][j][k-1][1]\n\t\t + ldz[i][j][m][2] * v[i][j][k-1][2]\n\t\t + ldz[i][j][m][3] * v[i][j][k-1][3]\n\t\t + ldz[i][j][m][4] * v[i][j][k-1][4] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "i = ist; i <= iend; i++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n for (m = 0; m < 5; m++) {\n\tv[i][j][k][m] = v[i][j][k][m]\n\t - omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]\n\t\t + ldz[i][j][m][1] * v[i][j][k-1][1]\n\t\t + ldz[i][j][m][2] * v[i][j][k-1][2]\n\t\t + ldz[i][j][m][3] * v[i][j][k-1][3]\n\t\t + ldz[i][j][m][4] * v[i][j][k-1][4] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "--------------------------------------*/\n int i, j, m;\n double tmp, tmp1;\n double tmat[5][5];\n\n for (i = iend; i >= ist; i--) {\n #pragma omp parallel for \n for (j = jend; j >= jst; j--) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\ttv[i][j][m] = \n\t omega * ( udz[i][j][m][0] * v[i][j][k+1][0]\n\t\t + udz[i][j][m][1] * v[i][j][k+1][1]\n\t\t + udz[i][j][m][2] * v[i][j][k+1][2]\n\t\t + udz[i][j][m][3] * v[i][j][k+1][3]\n\t\t + udz[i][j][m][4] * v[i][j][k+1][4] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " tmp, tmp1;\n double tmat[5][5];\n\n #pragma omp parallel for \n for (i = iend; i >= ist; i--) {\n for (j = jend; j >= jst; j--) {\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\ttv[i][j][m] = \n\t omega * ( udz[i][j][m][0] * v[i][j][k+1][0]\n\t\t + udz[i][j][m][1] * v[i][j][k+1][1]\n\t\t + udz[i][j][m][2] * v[i][j][k+1][2]\n\t\t + udz[i][j][m][3] * v[i][j][k+1][3]\n\t\t + udz[i][j][m][4] * v[i][j][k+1][4] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "i = iend; i >= ist; i--) {\n #pragma omp parallel for \n for (j = jend; j >= jst; j--) {\n for (m = 0; m < 5; m++) {\n\ttv[i][j][m] = \n\t omega * ( udz[i][j][m][0] * v[i][j][k+1][0]\n\t\t + udz[i][j][m][1] * v[i][j][k+1][1]\n\t\t + udz[i][j][m][2] * v[i][j][k+1][2]\n\t\t + udz[i][j][m][3] * v[i][j][k+1][3]\n\t\t + udz[i][j][m][4] * v[i][j][k+1][4] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "uble u21jm1, u31jm1, u41jm1, u51jm1;\n double u21km1, u31km1, u41km1, u51km1;\n\n dsspm = dssp;\n\n for (i = 0; i < nx; i++) {\n #pragma omp parallel for \n for (j = 0; j < ny; j++) {\n #pragma omp parallel for \n for (k = 0; k < nz; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "km1, u41km1, u51km1;\n\n dsspm = dssp;\n\n #pragma omp parallel for \n for (i = 0; i < nx; i++) {\n for (j = 0; j < ny; j++) {\n #pragma omp parallel for \n for (k = 0; k < nz; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "or \n for (i = 0; i < nx; i++) {\n #pragma omp parallel for \n for (j = 0; j < ny; j++) {\n for (k = 0; k < nz; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " \n for (j = 0; j < ny; j++) {\n #pragma omp parallel for \n for (k = 0; k < nz; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(iglob, xi)", "context_chars": 100, "text": "gma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t}\n }\n }\n }\n\n for (i = 0; i < nx; i++) {\n iglob = i;\n xi = ( (double)(iglob) ) / ( nx0 - 1 );\n #pragma omp parallel for private(jglob, eta) \n for (j = 0; j < ny; j++) {\n jglob = j;\n eta = ( (double)(jglob) ) / ( ny0 - 1 );\n #pragma omp parallel for private(zeta) \n for (k = 0; k < nz; k++) {\n\tzeta = ( (double)(k) ) / ( nz - 1 );\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = ce[m][0]\n\t + ce[m][1] * xi\n\t + ce[m][2] * eta\n\t + ce[m][3] * zeta\n\t + ce[m][4] * xi * xi\n\t + ce[m][5] * eta * eta\n\t + ce[m][6] * zeta * zeta\n\t + ce[m][7] * xi * xi * xi\n\t + ce[m][8] * eta * eta * eta\n\t + ce[m][9] * zeta * zeta * zeta\n\t + ce[m][10] * xi * xi * xi * xi\n\t + ce[m][11] * eta * eta * eta * eta\n\t + ce[m][12] * zeta * zeta * zeta * zeta;\n\t}\n }\n }\n } #pragma omp parallel for private(iglob, xi)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(jglob, eta) ", "context_chars": 100, "text": "ob, xi)\n for (i = 0; i < nx; i++) {\n iglob = i;\n xi = ( (double)(iglob) ) / ( nx0 - 1 );\n for (j = 0; j < ny; j++) {\n jglob = j;\n eta = ( (double)(jglob) ) / ( ny0 - 1 );\n #pragma omp parallel for private(zeta) \n for (k = 0; k < nz; k++) {\n\tzeta = ( (double)(k) ) / ( nz - 1 );\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = ce[m][0]\n\t + ce[m][1] * xi\n\t + ce[m][2] * eta\n\t + ce[m][3] * zeta\n\t + ce[m][4] * xi * xi\n\t + ce[m][5] * eta * eta\n\t + ce[m][6] * zeta * zeta\n\t + ce[m][7] * xi * xi * xi\n\t + ce[m][8] * eta * eta * eta\n\t + ce[m][9] * zeta * zeta * zeta\n\t + ce[m][10] * xi * xi * xi * xi\n\t + ce[m][11] * eta * eta * eta * eta\n\t + ce[m][12] * zeta * zeta * zeta * zeta;\n\t}\n }\n } #pragma omp parallel for private(jglob, eta) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(zeta) ", "context_chars": 100, "text": " for (j = 0; j < ny; j++) {\n jglob = j;\n eta = ( (double)(jglob) ) / ( ny0 - 1 );\n for (k = 0; k < nz; k++) {\n\tzeta = ( (double)(k) ) / ( nz - 1 );\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = ce[m][0]\n\t + ce[m][1] * xi\n\t + ce[m][2] * eta\n\t + ce[m][3] * zeta\n\t + ce[m][4] * xi * xi\n\t + ce[m][5] * eta * eta\n\t + ce[m][6] * zeta * zeta\n\t + ce[m][7] * xi * xi * xi\n\t + ce[m][8] * eta * eta * eta\n\t + ce[m][9] * zeta * zeta * zeta\n\t + ce[m][10] * xi * xi * xi * xi\n\t + ce[m][11] * eta * eta * eta * eta\n\t + ce[m][12] * zeta * zeta * zeta * zeta;\n\t}\n } #pragma omp parallel for private(zeta) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s\n--------------------------------------------------------------------*/\n\n L1 = 0;\n L2 = nx-1;\n\n for (i = L1; i <= L2; i++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for private(u21, q) \n for (k = 1; k < nz - 1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][1];\n\tu21 = rsd[i][j][k][1] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u21;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------*/\n\n L1 = 0;\n L2 = nx-1;\n\n #pragma omp parallel for \n for (i = L1; i <= L2; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for private(u21, q) \n for (k = 1; k < nz - 1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][1];\n\tu21 = rsd[i][j][k][1] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u21;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u21, q) ", "context_chars": 100, "text": "r (i = L1; i <= L2; i++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n for (k = 1; k < nz - 1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][1];\n\tu21 = rsd[i][j][k][1] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u21;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;\n } #pragma omp parallel for private(u21, q) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)", "context_chars": 100, "text": "][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;\n }\n }\n }\n\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)\n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) firstprivate(ist)\n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21i = tmp * rsd[i][j][k][1];\n\tu31i = tmp * rsd[i][j][k][2];\n\tu41i = tmp * rsd[i][j][k][3];\n\tu51i = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i-1][j][k][0];\n\n\tu21im1 = tmp * rsd[i-1][j][k][1];\n\tu31im1 = tmp * rsd[i-1][j][k][2];\n\tu41im1 = tmp * rsd[i-1][j][k][3];\n\tu51im1 = tmp * rsd[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * \n\t ( u21i - u21im1 );\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )\n\t\t - ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )\n\t + (1.0/6.0)\n\t * tx3 * ( u21i*u21i - u21im1*u21im1 )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dx1 * tx1 * ( rsd[i-1][j][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + \t rsd[i+1][j][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( rsd[i-1][j][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i+1][j][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( rsd[i-1][j][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i+1][j][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( rsd[i-1][j][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i+1][j][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( rsd[i-1][j][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i+1][j][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[1][j][k][m] = frct[1][j][k][m]\n\t - dsspm * ( + 5.0 * rsd[1][j][k][m]\n\t\t - 4.0 * rsd[2][j][k][m]\n\t\t + rsd[3][j][k][m] );\n\tfrct[2][j][k][m] = frct[2][j][k][m]\n\t - dsspm * ( - 4.0 * rsd[1][j][k][m]\n\t\t + 6.0 * rsd[2][j][k][m]\n\t\t - 4.0 * rsd[3][j][k][m]\n\t\t + rsd[4][j][k][m] );\n }\n\n ist1 = 3;\n iend1 = nx - 4;\n for (i = ist1; i <=iend1; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i-2][j][k][m]\n\t\t\t\t - 4.0 * rsd[i-1][j][k][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i+1][j][k][m]\n\t\t\t\t + rsd[i+2][j][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[nx-3][j][k][m] = frct[nx-3][j][k][m]\n\t - dsspm * ( rsd[nx-5][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-4][j][k][m]\n\t\t\t\t + 6.0 * rsd[nx-3][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-2][j][k][m] );\n\tfrct[nx-2][j][k][m] = frct[nx-2][j][k][m]\n\t - dsspm * ( rsd[nx-4][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-3][j][k][m]\n\t\t\t\t + 5.0 * rsd[nx-2][j][k][m] );\n }\n }\n } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)", "context_chars": 100, "text": "m1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)\n for (j = jst; j <= jend; j++) {\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)\n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) firstprivate(ist)\n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21i = tmp * rsd[i][j][k][1];\n\tu31i = tmp * rsd[i][j][k][2];\n\tu41i = tmp * rsd[i][j][k][3];\n\tu51i = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i-1][j][k][0];\n\n\tu21im1 = tmp * rsd[i-1][j][k][1];\n\tu31im1 = tmp * rsd[i-1][j][k][2];\n\tu41im1 = tmp * rsd[i-1][j][k][3];\n\tu51im1 = tmp * rsd[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * \n\t ( u21i - u21im1 );\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )\n\t\t - ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )\n\t + (1.0/6.0)\n\t * tx3 * ( u21i*u21i - u21im1*u21im1 )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dx1 * tx1 * ( rsd[i-1][j][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + \t rsd[i+1][j][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( rsd[i-1][j][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i+1][j][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( rsd[i-1][j][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i+1][j][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( rsd[i-1][j][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i+1][j][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( rsd[i-1][j][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i+1][j][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[1][j][k][m] = frct[1][j][k][m]\n\t - dsspm * ( + 5.0 * rsd[1][j][k][m]\n\t\t - 4.0 * rsd[2][j][k][m]\n\t\t + rsd[3][j][k][m] );\n\tfrct[2][j][k][m] = frct[2][j][k][m]\n\t - dsspm * ( - 4.0 * rsd[1][j][k][m]\n\t\t + 6.0 * rsd[2][j][k][m]\n\t\t - 4.0 * rsd[3][j][k][m]\n\t\t + rsd[4][j][k][m] );\n }\n\n ist1 = 3;\n iend1 = nx - 4;\n for (i = ist1; i <=iend1; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i-2][j][k][m]\n\t\t\t\t - 4.0 * rsd[i-1][j][k][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i+1][j][k][m]\n\t\t\t\t + rsd[i+2][j][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[nx-3][j][k][m] = frct[nx-3][j][k][m]\n\t - dsspm * ( rsd[nx-5][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-4][j][k][m]\n\t\t\t\t + 6.0 * rsd[nx-3][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-2][j][k][m] );\n\tfrct[nx-2][j][k][m] = frct[nx-2][j][k][m]\n\t - dsspm * ( rsd[nx-4][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-3][j][k][m]\n\t\t\t\t + 5.0 * rsd[nx-2][j][k][m] );\n }\n } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)", "context_chars": 100, "text": "u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)\n for (k = 1; k <= nz - 2; k++) {\n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist)\n for (i = ist; i <= iend; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) firstprivate(ist)", "context_chars": 100, "text": "][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21i = tmp * rsd[i][j][k][1];\n\tu31i = tmp * rsd[i][j][k][2];\n\tu41i = tmp * rsd[i][j][k][3];\n\tu51i = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i-1][j][k][0];\n\n\tu21im1 = tmp * rsd[i-1][j][k][1];\n\tu31im1 = tmp * rsd[i-1][j][k][2];\n\tu41im1 = tmp * rsd[i-1][j][k][3];\n\tu51im1 = tmp * rsd[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * \n\t ( u21i - u21im1 );\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )\n\t\t - ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )\n\t + (1.0/6.0)\n\t * tx3 * ( u21i*u21i - u21im1*u21im1 )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) firstprivate(ist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": ".0)\n\t * tx3 * ( u21i*u21i - u21im1*u21im1 )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n for (i = ist; i <= iend; i++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dx1 * tx1 * ( rsd[i-1][j][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + \t rsd[i+1][j][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( rsd[i-1][j][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i+1][j][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( rsd[i-1][j][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i+1][j][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( rsd[i-1][j][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i+1][j][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( rsd[i-1][j][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i+1][j][k][4] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u31, q)", "context_chars": 100, "text": "s\n--------------------------------------------------------------------*/\n\n L1 = 0;\n L2 = ny-1;\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for private(u31, q)\n //firstprivate(iend ,ist ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (j = L1; j <= L2; j++) {\n #pragma omp parallel for private(u31, q)\n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][2];\n\tu31 = rsd[i][j][k][2] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u31;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;\n }\n }\n } #pragma omp parallel for private(u31, q)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u31, q)", "context_chars": 100, "text": "= 0;\n L2 = ny-1;\n\n #pragma omp parallel for private(u31, q)\n for (i = ist; i <= iend; i++) {\n //firstprivate(iend ,ist ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (j = L1; j <= L2; j++) {\n #pragma omp parallel for private(u31, q)\n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][2];\n\tu31 = rsd[i][j][k][2] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u31;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;\n }\n } #pragma omp parallel for private(u31, q)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u31, q)", "context_chars": 100, "text": "q)\n //firstprivate(iend ,ist ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (j = L1; j <= L2; j++) {\n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][2];\n\tu31 = rsd[i][j][k][2] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u31;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;\n } #pragma omp parallel for private(u31, q)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)", "context_chars": 100, "text": "][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;\n }\n }\n }\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)\n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21j = tmp * rsd[i][j][k][1];\n\tu31j = tmp * rsd[i][j][k][2];\n\tu41j = tmp * rsd[i][j][k][3];\n\tu51j = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j-1][k][0];\n\n\tu21jm1 = tmp * rsd[i][j-1][k][1];\n\tu31jm1 = tmp * rsd[i][j-1][k][2];\n\tu41jm1 = tmp * rsd[i][j-1][k][3];\n\tu51jm1 = tmp * rsd[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * \n\t ( u31j - u31jm1 );\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )\n\t\t - ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )\n\t + (1.0/6.0)\n\t * ty3 * ( u31j*u31j - u31jm1*u31jm1 )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dy1 * ty1 * ( rsd[i][j-1][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j+1][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( rsd[i][j-1][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j+1][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( rsd[i][j-1][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j+1][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( rsd[i][j-1][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j+1][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( rsd[i][j-1][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j+1][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[i][1][k][m] = frct[i][1][k][m]\n\t - dsspm * ( + 5.0 * rsd[i][1][k][m]\n\t\t - 4.0 * rsd[i][2][k][m]\n\t\t + rsd[i][3][k][m] );\n\tfrct[i][2][k][m] = frct[i][2][k][m]\n\t - dsspm * ( - 4.0 * rsd[i][1][k][m]\n\t\t + 6.0 * rsd[i][2][k][m]\n\t\t - 4.0 * rsd[i][3][k][m]\n\t\t + rsd[i][4][k][m] );\n }\n\n jst1 = 3;\n jend1 = ny - 4;\n\n for (j = jst1; j <= jend1; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i][j-2][k][m]\n\t\t\t\t - 4.0 * rsd[i][j-1][k][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i][j+1][k][m]\n\t\t\t\t + rsd[i][j+2][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[i][ny-3][k][m] = frct[i][ny-3][k][m]\n\t - dsspm * ( rsd[i][ny-5][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-4][k][m]\n\t\t\t\t + 6.0 * rsd[i][ny-3][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-2][k][m] );\n\tfrct[i][ny-2][k][m] = frct[i][ny-2][k][m]\n\t - dsspm * ( rsd[i][ny-4][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-3][k][m]\n\t\t\t\t + 5.0 * rsd[i][ny-2][k][m] );\n }\n\n }\n } #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)", "context_chars": 100, "text": "(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)\n for (i = ist; i <= iend; i++) {\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)\n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21j = tmp * rsd[i][j][k][1];\n\tu31j = tmp * rsd[i][j][k][2];\n\tu41j = tmp * rsd[i][j][k][3];\n\tu51j = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j-1][k][0];\n\n\tu21jm1 = tmp * rsd[i][j-1][k][1];\n\tu31jm1 = tmp * rsd[i][j-1][k][2];\n\tu41jm1 = tmp * rsd[i][j-1][k][3];\n\tu51jm1 = tmp * rsd[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * \n\t ( u31j - u31jm1 );\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )\n\t\t - ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )\n\t + (1.0/6.0)\n\t * ty3 * ( u31j*u31j - u31jm1*u31jm1 )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dy1 * ty1 * ( rsd[i][j-1][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j+1][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( rsd[i][j-1][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j+1][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( rsd[i][j-1][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j+1][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( rsd[i][j-1][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j+1][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( rsd[i][j-1][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j+1][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[i][1][k][m] = frct[i][1][k][m]\n\t - dsspm * ( + 5.0 * rsd[i][1][k][m]\n\t\t - 4.0 * rsd[i][2][k][m]\n\t\t + rsd[i][3][k][m] );\n\tfrct[i][2][k][m] = frct[i][2][k][m]\n\t - dsspm * ( - 4.0 * rsd[i][1][k][m]\n\t\t + 6.0 * rsd[i][2][k][m]\n\t\t - 4.0 * rsd[i][3][k][m]\n\t\t + rsd[i][4][k][m] );\n }\n\n jst1 = 3;\n jend1 = ny - 4;\n\n for (j = jst1; j <= jend1; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i][j-2][k][m]\n\t\t\t\t - 4.0 * rsd[i][j-1][k][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i][j+1][k][m]\n\t\t\t\t + rsd[i][j+2][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[i][ny-3][k][m] = frct[i][ny-3][k][m]\n\t - dsspm * ( rsd[i][ny-5][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-4][k][m]\n\t\t\t\t + 6.0 * rsd[i][ny-3][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-2][k][m] );\n\tfrct[i][ny-2][k][m] = frct[i][ny-2][k][m]\n\t - dsspm * ( rsd[i][ny-4][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-3][k][m]\n\t\t\t\t + 5.0 * rsd[i][ny-2][k][m] );\n }\n\n } #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": ", u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)\n for (k = 1; k <= nz - 2; k++) {\n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)", "context_chars": 100, "text": "][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21j = tmp * rsd[i][j][k][1];\n\tu31j = tmp * rsd[i][j][k][2];\n\tu41j = tmp * rsd[i][j][k][3];\n\tu51j = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j-1][k][0];\n\n\tu21jm1 = tmp * rsd[i][j-1][k][1];\n\tu31jm1 = tmp * rsd[i][j-1][k][2];\n\tu41jm1 = tmp * rsd[i][j-1][k][3];\n\tu51jm1 = tmp * rsd[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * \n\t ( u31j - u31jm1 );\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )\n\t\t - ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )\n\t + (1.0/6.0)\n\t * ty3 * ( u31j*u31j - u31jm1*u31jm1 )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n } #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": ".0)\n\t * ty3 * ( u31j*u31j - u31jm1*u31jm1 )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n for (j = jst; j <= jend; j++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dy1 * ty1 * ( rsd[i][j-1][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j+1][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( rsd[i][j-1][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j+1][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( rsd[i][j-1][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j+1][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( rsd[i][j-1][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j+1][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( rsd[i][j-1][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j+1][k][4] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u41, q)", "context_chars": 100, "text": "direction flux differences\n--------------------------------------------------------------------*/\n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for private(u41, q)\n for (k = 0; k <= nz-1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][3];\n\tu41 = rsd[i][j][k][3] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u41;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u41;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;\n }\n\n #pragma omp parallel for \n for (k = 1; k <= nz - 2; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n }\n #pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1)\n for (k = 1; k <= nz-1; k++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21k = tmp * rsd[i][j][k][1];\n\tu31k = tmp * rsd[i][j][k][2];\n\tu41k = tmp * rsd[i][j][k][3];\n\tu51k = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j][k-1][0];\n\n\tu21km1 = tmp * rsd[i][j][k-1][1];\n\tu31km1 = tmp * rsd[i][j][k-1][2];\n\tu41km1 = tmp * rsd[i][j][k-1][3];\n\tu51km1 = tmp * rsd[i][j][k-1][4];\n\n\tflux[i][j][k][1] = tz3 * ( u21k - u21km1 );\n\tflux[i][j][k][2] = tz3 * ( u31k - u31km1 );\n\tflux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k \n\t\t\t\t\t - u41km1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )\n\t\t - ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )\n\t + (1.0/6.0)\n\t * tz3 * ( u41k*u41k - u41km1*u41km1 )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n }\n\n #pragma omp parallel for\n for (k = 1; k <= nz - 2; k++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dz1 * tz1 * ( rsd[i][j][k+1][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j][k-1][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )\n\t + dz2 * tz1 * ( rsd[i][j][k+1][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j][k-1][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )\n\t + dz3 * tz1 * ( rsd[i][j][k+1][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j][k-1][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )\n\t + dz4 * tz1 * ( rsd[i][j][k+1][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j][k-1][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )\n\t + dz5 * tz1 * ( rsd[i][j][k+1][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j][k-1][4] );\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[i][j][1][m] = frct[i][j][1][m]\n\t - dsspm * ( + 5.0 * rsd[i][j][1][m]\n\t\t - 4.0 * rsd[i][j][2][m]\n\t\t + rsd[i][j][3][m] );\n\tfrct[i][j][2][m] = frct[i][j][2][m]\n\t - dsspm * (- 4.0 * rsd[i][j][1][m]\n\t\t + 6.0 * rsd[i][j][2][m]\n\t\t - 4.0 * rsd[i][j][3][m]\n\t\t + rsd[i][j][4][m] );\n }\n\n #pragma omp parallel \n for (k = 3; k <= nz - 4; k++) {\n\t#pragma omp parallel\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i][j][k-2][m]\n\t\t\t\t - 4.0 * rsd[i][j][k-1][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i][j][k+1][m]\n\t\t\t\t + rsd[i][j][k+2][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[i][j][nz-3][m] = frct[i][j][nz-3][m]\n\t - dsspm * ( rsd[i][j][nz-5][m]\n\t\t\t\t - 4.0 * rsd[i][j][nz-4][m]\n\t\t\t\t + 6.0 * rsd[i][j][nz-3][m]\n\t\t\t\t - 4.0 * rsd[i][j][nz-2][m] );\n frct[i][j][nz-2][m] = frct[i][j][nz-2][m]\n\t - dsspm * ( rsd[i][j][nz-4][m]\n\t\t\t\t - 4.0 * rsd[i][j][nz-3][m]\n\t\t\t\t + 5.0 * rsd[i][j][nz-2][m] );\n }\n }\n } #pragma omp parallel for private(u41, q)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u41, q)", "context_chars": 100, "text": "lel for private(u41, q)\n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n for (k = 0; k <= nz-1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][3];\n\tu41 = rsd[i][j][k][3] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u41;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u41;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;\n } #pragma omp parallel for private(u41, q)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " rsd[i][j][k][4] - q );\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;\n }\n\n for (k = 1; k <= nz - 2; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " - C2 * q ) * u41;\n }\n\n #pragma omp parallel for \n for (k = 1; k <= nz - 2; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1)", "context_chars": 100, "text": "][m] = frct[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n }\n for (k = 1; k <= nz-1; k++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21k = tmp * rsd[i][j][k][1];\n\tu31k = tmp * rsd[i][j][k][2];\n\tu41k = tmp * rsd[i][j][k][3];\n\tu51k = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j][k-1][0];\n\n\tu21km1 = tmp * rsd[i][j][k-1][1];\n\tu31km1 = tmp * rsd[i][j][k-1][2];\n\tu41km1 = tmp * rsd[i][j][k-1][3];\n\tu51km1 = tmp * rsd[i][j][k-1][4];\n\n\tflux[i][j][k][1] = tz3 * ( u21k - u21km1 );\n\tflux[i][j][k][2] = tz3 * ( u31k - u31km1 );\n\tflux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k \n\t\t\t\t\t - u41km1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )\n\t\t - ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )\n\t + (1.0/6.0)\n\t * tz3 * ( u41k*u41k - u41km1*u41km1 )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n } #pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ".0)\n\t * tz3 * ( u41k*u41k - u41km1*u41km1 )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n }\n\n for (k = 1; k <= nz - 2; k++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dz1 * tz1 * ( rsd[i][j][k+1][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j][k-1][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )\n\t + dz2 * tz1 * ( rsd[i][j][k+1][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j][k-1][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )\n\t + dz3 * tz1 * ( rsd[i][j][k+1][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j][k-1][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )\n\t + dz4 * tz1 * ( rsd[i][j][k+1][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j][k-1][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )\n\t + dz5 * tz1 * ( rsd[i][j][k+1][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j][k-1][4] );\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-------------------*/\n int i, j, k, m;\n int iglob, jglob;\n double tmp;\n double u000ijk[5];\n\n for (m = 0; m < 5; m++) {\n errnm[m] = 0.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp)", "context_chars": 100, "text": "d; j++) {\n jglob = j;\n for (k = 1; k <= nz-2; k++) {\n\texact( iglob, jglob, k, u000ijk );\n\tfor (m = 0; m < 5; m++) {\n\t tmp = ( u000ijk[m] - u[i][j][k][m] );\n\t errnm[m] = errnm[m] + tmp *tmp;\n\t} #pragma omp parallel for private(tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\t tmp = ( u000ijk[m] - u[i][j][k][m] );\n\t errnm[m] = errnm[m] + tmp *tmp;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i = ((double)i) / (nx0 - 1);\n eta = ((double)j) / (ny0 - 1);\n zeta = ((double)k) / (nz - 1);\n\n for (m = 0; m < 5; m++) {\n u000ijk[m] = ce[m][0]\n + ce[m][1] * xi\n + ce[m][2] * eta\n + ce[m][3] * zeta\n + ce[m][4] * xi * xi\n + ce[m][5] * eta * eta\n + ce[m][6] * zeta * zeta\n + ce[m][7] * xi * xi * xi\n + ce[m][8] * eta * eta * eta\n + ce[m][9] * zeta * zeta * zeta\n + ce[m][10] * xi * xi * xi * xi\n + ce[m][11] * eta * eta * eta * eta\n + ce[m][12] * zeta * zeta * zeta * zeta;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2, tmp3) ", "context_chars": 100, "text": " c34 = C3 * C4;\n\n #pragma omp for private(tmp1, tmp2, tmp3)\n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n\n/*--------------------------------------------------------------------\nc form the block daigonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n d[i][j][0][0] = 1.0\n\t+ dt * 2.0 * ( tx1 * dx1\n\t\t\t + ty1 * dy1\n\t\t\t + tz1 * dz1 );\n d[i][j][0][1] = 0.0;\n d[i][j][0][2] = 0.0;\n d[i][j][0][3] = 0.0;\n d[i][j][0][4] = 0.0;\n\n d[i][j][1][0] = dt * 2.0\n\t* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )\n\t + ty1 * ( - c34 * tmp2 * u[i][j][k][1] )\n\t + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );\n d[i][j][1][1] = 1.0\n\t+ dt * 2.0 \n\t* ( tx1 * r43 * c34 * tmp1\n\t + ty1 * c34 * tmp1\n\t + tz1 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx2\n\t\t\t + ty1 * dy2\n\t\t\t + tz1 * dz2 );\n d[i][j][1][2] = 0.0;\n d[i][j][1][3] = 0.0;\n d[i][j][1][4] = 0.0;\n\n d[i][j][2][0] = dt * 2.0\n\t* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )\n\t + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )\n\t + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );\n d[i][j][2][1] = 0.0;\n d[i][j][2][2] = 1.0\n\t+ dt * 2.0\n\t* ( tx1 * c34 * tmp1\n\t + ty1 * r43 * c34 * tmp1\n\t + tz1 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx3\n\t\t\t+ ty1 * dy3\n\t\t\t+ tz1 * dz3 );\n d[i][j][2][3] = 0.0;\n d[i][j][2][4] = 0.0;\n\n d[i][j][3][0] = dt * 2.0\n\t* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )\n\t + ty1 * ( - c34 * tmp2 * u[i][j][k][3] )\n\t + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );\n d[i][j][3][1] = 0.0;\n d[i][j][3][2] = 0.0;\n d[i][j][3][3] = 1.0\n\t+ dt * 2.0\n\t* ( tx1 * c34 * tmp1\n\t + ty1 * c34 * tmp1\n\t + tz1 * r43 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx4\n\t\t\t+ ty1 * dy4\n\t\t\t+ tz1 * dz4 );\n d[i][j][3][4] = 0.0;\n\n d[i][j][4][0] = dt * 2.0\n\t* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] )\n\t + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] )\n\t + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] ) );\n d[i][j][4][1] = dt * 2.0\n\t* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]\n\t + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]\n\t + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );\n d[i][j][4][2] = dt * 2.0\n\t* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]\n\t + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]\n\t + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );\n d[i][j][4][3] = dt * 2.0\n\t* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]\n\t + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]\n\t + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );\n d[i][j][4][4] = 1.0\n\t+ dt * 2.0 * ( tx1 * c1345 * tmp1\n\t\t + ty1 * c1345 * tmp1\n\t\t + tz1 * c1345 * tmp1 )\n + dt * 2.0 * ( tx1 * dx5\n\t\t\t+ ty1 * dy5\n\t\t\t+ tz1 * dz5 );\n\n/*--------------------------------------------------------------------\nc form the first block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k-1][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n a[i][j][0][0] = - dt * tz1 * dz1;\n a[i][j][0][1] = 0.0;\n a[i][j][0][2] = 0.0;\n a[i][j][0][3] = - dt * tz2;\n a[i][j][0][4] = 0.0;\n\n a[i][j][1][0] = - dt * tz2\n\t* ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )\n\t- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] );\n a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )\n\t- dt * tz1 * c34 * tmp1\n\t- dt * tz1 * dz2 ;\n a[i][j][1][2] = 0.0;\n a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 );\n a[i][j][1][4] = 0.0;\n\n a[i][j][2][0] = - dt * tz2\n\t* ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )\n\t- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] );\n a[i][j][2][1] = 0.0;\n a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )\n\t- dt * tz1 * ( c34 * tmp1 )\n\t- dt * tz1 * dz3;\n a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 );\n a[i][j][2][4] = 0.0;\n\n a[i][j][3][0] = - dt * tz2\n\t* ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 )\n\t + 0.50 * C2\n\t * ( ( u[i][j][k-1][1] * u[i][j][k-1][1]\n\t\t + u[i][j][k-1][2] * u[i][j][k-1][2]\n\t\t + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) )\n\t- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] );\n a[i][j][3][1] = - dt * tz2\n\t* ( - C2 * ( u[i][j][k-1][1] * tmp1 ) );\n a[i][j][3][2] = - dt * tz2\n\t* ( - C2 * ( u[i][j][k-1][2] * tmp1 ) );\n a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 )\n\t* ( u[i][j][k-1][3] * tmp1 )\n\t- dt * tz1 * ( r43 * c34 * tmp1 )\n\t- dt * tz1 * dz4;\n a[i][j][3][4] = - dt * tz2 * C2;\n\n a[i][j][4][0] = - dt * tz2\n\t* ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1]\n + u[i][j][k-1][2] * u[i][j][k-1][2]\n + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2\n\t - C1 * ( u[i][j][k-1][4] * tmp1 ) )\n\t * ( u[i][j][k-1][3] * tmp1 ) )\n\t- dt * tz1\n\t* ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1])\n\t - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2])\n\t - ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3])\n\t - c1345 * tmp2 * u[i][j][k-1][4] );\n a[i][j][4][1] = - dt * tz2\n\t* ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )\n\t- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1];\n a[i][j][4][2] = - dt * tz2\n\t* ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )\n\t- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2];\n a[i][j][4][3] = - dt * tz2\n\t* ( C1 * ( u[i][j][k-1][4] * tmp1 )\n - 0.50 * C2\n * ( ( u[i][j][k-1][1]*u[i][j][k-1][1]\n\t\t + u[i][j][k-1][2]*u[i][j][k-1][2]\n\t\t + 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) )\n\t- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3];\n a[i][j][4][4] = - dt * tz2\n\t* ( C1 * ( u[i][j][k-1][3] * tmp1 ) )\n\t- dt * tz1 * c1345 * tmp1\n\t- dt * tz1 * dz5;\n\n/*--------------------------------------------------------------------\nc form the second block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j-1][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n b[i][j][0][0] = - dt * ty1 * dy1;\n b[i][j][0][1] = 0.0;\n b[i][j][0][2] = - dt * ty2;\n b[i][j][0][3] = 0.0;\n b[i][j][0][4] = 0.0;\n\n b[i][j][1][0] = - dt * ty2\n\t* ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )\n\t- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] );\n b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )\n\t- dt * ty1 * ( c34 * tmp1 )\n\t- dt * ty1 * dy2;\n b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 );\n b[i][j][1][3] = 0.0;\n b[i][j][1][4] = 0.0;\n\n b[i][j][2][0] = - dt * ty2\n\t* ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 )\n\t + 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1]\n\t\t\t + u[i][j-1][k][2] * u[i][j-1][k][2]\n\t\t\t + u[i][j-1][k][3] * u[i][j-1][k][3] )\n\t\t\t * tmp2 ) )\n\t- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] );\n b[i][j][2][1] = - dt * ty2\n\t* ( - C2 * ( u[i][j-1][k][1] * tmp1 ) );\n b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 )\n\t\t\t\t * ( u[i][j-1][k][2] * tmp1 ) )\n\t- dt * ty1 * ( r43 * c34 * tmp1 )\n\t- dt * ty1 * dy3;\n b[i][j][2][3] = - dt * ty2\n\t* ( - C2 * ( u[i][j-1][k][3] * tmp1 ) );\n b[i][j][2][4] = - dt * ty2 * C2;\n\n b[i][j][3][0] = - dt * ty2\n\t* ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )\n\t- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] );\n b[i][j][3][1] = 0.0;\n b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 );\n b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )\n\t- dt * ty1 * ( c34 * tmp1 )\n\t- dt * ty1 * dy4;\n b[i][j][3][4] = 0.0;\n\n b[i][j][4][0] = - dt * ty2\n\t* ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1]\n\t\t + u[i][j-1][k][2] * u[i][j-1][k][2]\n\t\t + u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2\n\t - C1 * ( u[i][j-1][k][4] * tmp1 ) )\n\t * ( u[i][j-1][k][2] * tmp1 ) )\n\t- dt * ty1\n\t* ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1]))\n\t - ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2]))\n\t - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3]))\n\t - c1345*tmp2*u[i][j-1][k][4] );\n b[i][j][4][1] = - dt * ty2\n\t* ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )\n\t- dt * ty1\n\t* ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1];\n b[i][j][4][2] = - dt * ty2\n\t* ( C1 * ( u[i][j-1][k][4] * tmp1 )\n\t - 0.50 * C2 \n\t * ( ( u[i][j-1][k][1]*u[i][j-1][k][1]\n + 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2]\n\t\t + u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) )\n\t- dt * ty1\n\t* ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2];\n b[i][j][4][3] = - dt * ty2\n\t* ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )\n\t- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3];\n b[i][j][4][4] = - dt * ty2\n\t* ( C1 * ( u[i][j-1][k][2] * tmp1 ) )\n\t- dt * ty1 * c1345 * tmp1\n\t- dt * ty1 * dy5;\n\n/*--------------------------------------------------------------------\nc form the third block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i-1][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n c[i][j][0][0] = - dt * tx1 * dx1;\n c[i][j][0][1] = - dt * tx2;\n c[i][j][0][2] = 0.0;\n c[i][j][0][3] = 0.0;\n c[i][j][0][4] = 0.0;\n\n c[i][j][1][0] = - dt * tx2\n\t* ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 )\n\t + C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1]\n + u[i-1][j][k][2] * u[i-1][j][k][2]\n + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 )\n\t- dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] );\n c[i][j][1][1] = - dt * tx2\n\t* ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) )\n\t- dt * tx1 * ( r43 * c34 * tmp1 )\n\t- dt * tx1 * dx2;\n c[i][j][1][2] = - dt * tx2\n\t* ( - C2 * ( u[i-1][j][k][2] * tmp1 ) );\n c[i][j][1][3] = - dt * tx2\n\t* ( - C2 * ( u[i-1][j][k][3] * tmp1 ) );\n c[i][j][1][4] = - dt * tx2 * C2;\n\n c[i][j][2][0] = - dt * tx2\n\t* ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 )\n\t- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] );\n c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 );\n c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )\n\t- dt * tx1 * ( c34 * tmp1 )\n\t- dt * tx1 * dx3;\n c[i][j][2][3] = 0.0;\n c[i][j][2][4] = 0.0;\n\n c[i][j][3][0] = - dt * tx2\n\t* ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 )\n\t- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] );\n c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 );\n c[i][j][3][2] = 0.0;\n c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )\n\t- dt * tx1 * ( c34 * tmp1 )\n\t- dt * tx1 * dx4;\n c[i][j][3][4] = 0.0;\n\n c[i][j][4][0] = - dt * tx2\n\t* ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1]\n\t\t + u[i-1][j][k][2] * u[i-1][j][k][2]\n\t\t + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2\n\t - C1 * ( u[i-1][j][k][4] * tmp1 ) )\n\t * ( u[i-1][j][k][1] * tmp1 ) )\n\t- dt * tx1\n\t* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) )\n\t - c1345 * tmp2 * u[i-1][j][k][4] );\n c[i][j][4][1] = - dt * tx2\n\t* ( C1 * ( u[i-1][j][k][4] * tmp1 )\n\t - 0.50 * C2\n\t * ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1]\n\t\t + u[i-1][j][k][2]*u[i-1][j][k][2]\n\t\t + u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) )\n\t- dt * tx1\n\t* ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1];\n c[i][j][4][2] = - dt * tx2\n\t* ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 )\n\t- dt * tx1\n\t* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2];\n c[i][j][4][3] = - dt * tx2\n\t* ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 )\n\t- dt * tx1\n\t* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3];\n c[i][j][4][4] = - dt * tx2\n\t* ( C1 * ( u[i-1][j][k][1] * tmp1 ) )\n\t- dt * tx1 * c1345 * tmp1\n\t- dt * tx1 * dx5;\n } #pragma omp parallel for private(tmp1, tmp2, tmp3) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2, tmp3)", "context_chars": 100, "text": "NMP) \n #pragma omp parallel for private(tmp1, tmp2, tmp3)\n for (i = iend; i >= ist; i--) {\n for (j = jend; j >= jst; j--) {\n/*#else\t \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n*/\t\n\n/*--------------------------------------------------------------------\nc form the block daigonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n d[i][j][0][0] = 1.0\n\t+ dt * 2.0 * ( tx1 * dx1\n\t\t\t + ty1 * dy1\n\t\t\t + tz1 * dz1 );\n d[i][j][0][1] = 0.0;\n d[i][j][0][2] = 0.0;\n d[i][j][0][3] = 0.0;\n d[i][j][0][4] = 0.0;\n\n d[i][j][1][0] = dt * 2.0\n\t* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )\n\t + ty1 * ( - c34 * tmp2 * u[i][j][k][1] )\n\t + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );\n d[i][j][1][1] = 1.0\n\t+ dt * 2.0 \n\t* ( tx1 * r43 * c34 * tmp1\n\t + ty1 * c34 * tmp1\n\t + tz1 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx2\n\t\t\t + ty1 * dy2\n\t\t\t + tz1 * dz2 );\n d[i][j][1][2] = 0.0;\n d[i][j][1][3] = 0.0;\n d[i][j][1][4] = 0.0;\n\n d[i][j][2][0] = dt * 2.0\n\t* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )\n\t + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )\n\t + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );\n d[i][j][2][1] = 0.0;\n d[i][j][2][2] = 1.0\n\t+ dt * 2.0\n\t* ( tx1 * c34 * tmp1\n\t + ty1 * r43 * c34 * tmp1\n\t + tz1 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx3\n\t\t\t+ ty1 * dy3\n\t\t\t+ tz1 * dz3 );\n d[i][j][2][3] = 0.0;\n d[i][j][2][4] = 0.0;\n\n d[i][j][3][0] = dt * 2.0\n\t* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )\n\t + ty1 * ( - c34 * tmp2 * u[i][j][k][3] )\n\t + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );\n d[i][j][3][1] = 0.0;\n d[i][j][3][2] = 0.0;\n d[i][j][3][3] = 1.0\n\t+ dt * 2.0\n\t* ( tx1 * c34 * tmp1\n\t + ty1 * c34 * tmp1\n\t + tz1 * r43 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx4\n\t\t\t+ ty1 * dy4\n\t\t\t+ tz1 * dz4 );\n d[i][j][3][4] = 0.0;\n\n d[i][j][4][0] = dt * 2.0\n\t* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] )\n\t + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] )\n\t + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] ) );\n d[i][j][4][1] = dt * 2.0\n\t* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]\n\t + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]\n\t + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );\n d[i][j][4][2] = dt * 2.0\n\t* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]\n\t + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]\n\t + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );\n d[i][j][4][3] = dt * 2.0\n\t* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]\n\t + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]\n\t + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );\n d[i][j][4][4] = 1.0\n + dt * 2.0 * ( tx1 * c1345 * tmp1\n\t\t + ty1 * c1345 * tmp1\n\t\t + tz1 * c1345 * tmp1 )\n + dt * 2.0 * ( tx1 * dx5\n\t\t\t+ ty1 * dy5\n\t\t\t+ tz1 * dz5 );\n\n/*--------------------------------------------------------------------\nc form the first block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i+1][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n a[i][j][0][0] = - dt * tx1 * dx1;\n a[i][j][0][1] = dt * tx2;\n a[i][j][0][2] = 0.0;\n a[i][j][0][3] = 0.0;\n a[i][j][0][4] = 0.0;\n\n a[i][j][1][0] = dt * tx2\n\t* ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 )\n\t + C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1]\n + u[i+1][j][k][2] * u[i+1][j][k][2]\n + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 )\n\t- dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] );\n a[i][j][1][1] = dt * tx2\n\t* ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) )\n\t- dt * tx1 * ( r43 * c34 * tmp1 )\n\t- dt * tx1 * dx2;\n a[i][j][1][2] = dt * tx2\n\t* ( - C2 * ( u[i+1][j][k][2] * tmp1 ) );\n a[i][j][1][3] = dt * tx2\n\t* ( - C2 * ( u[i+1][j][k][3] * tmp1 ) );\n a[i][j][1][4] = dt * tx2 * C2 ;\n\n a[i][j][2][0] = dt * tx2\n\t* ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 )\n\t- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] );\n a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 );\n a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )\n\t- dt * tx1 * ( c34 * tmp1 )\n\t- dt * tx1 * dx3;\n a[i][j][2][3] = 0.0;\n a[i][j][2][4] = 0.0;\n\n a[i][j][3][0] = dt * tx2\n\t* ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 )\n\t- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] );\n a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 );\n a[i][j][3][2] = 0.0;\n a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )\n\t- dt * tx1 * ( c34 * tmp1 )\n\t- dt * tx1 * dx4;\n a[i][j][3][4] = 0.0;\n\n a[i][j][4][0] = dt * tx2\n\t* ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1]\n\t\t + u[i+1][j][k][2] * u[i+1][j][k][2]\n\t\t + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2\n\t - C1 * ( u[i+1][j][k][4] * tmp1 ) )\n\t * ( u[i+1][j][k][1] * tmp1 ) )\n\t- dt * tx1\n\t* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) )\n\t - c1345 * tmp2 * u[i+1][j][k][4] );\n a[i][j][4][1] = dt * tx2\n\t* ( C1 * ( u[i+1][j][k][4] * tmp1 )\n\t - 0.50 * C2\n\t * ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1]\n\t\t + u[i+1][j][k][2]*u[i+1][j][k][2]\n\t\t + u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) )\n\t- dt * tx1\n\t* ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1];\n a[i][j][4][2] = dt * tx2\n\t* ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 )\n\t- dt * tx1\n\t* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2];\n a[i][j][4][3] = dt * tx2\n\t* ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 )\n\t- dt * tx1\n\t* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3];\n a[i][j][4][4] = dt * tx2\n\t* ( C1 * ( u[i+1][j][k][1] * tmp1 ) )\n\t- dt * tx1 * c1345 * tmp1\n\t- dt * tx1 * dx5;\n\n/*--------------------------------------------------------------------\nc form the second block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j+1][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n b[i][j][0][0] = - dt * ty1 * dy1;\n b[i][j][0][1] = 0.0;\n b[i][j][0][2] = dt * ty2;\n b[i][j][0][3] = 0.0;\n b[i][j][0][4] = 0.0;\n\n b[i][j][1][0] = dt * ty2\n\t* ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )\n\t- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] );\n b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )\n\t- dt * ty1 * ( c34 * tmp1 )\n\t- dt * ty1 * dy2;\n b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 );\n b[i][j][1][3] = 0.0;\n b[i][j][1][4] = 0.0;\n\n b[i][j][2][0] = dt * ty2\n\t* ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 )\n\t + 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1]\n\t\t\t + u[i][j+1][k][2] * u[i][j+1][k][2]\n\t\t\t + u[i][j+1][k][3] * u[i][j+1][k][3] )\n\t\t\t * tmp2 ) )\n\t- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] );\n b[i][j][2][1] = dt * ty2\n\t* ( - C2 * ( u[i][j+1][k][1] * tmp1 ) );\n b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 )\n\t\t\t\t * ( u[i][j+1][k][2] * tmp1 ) )\n\t- dt * ty1 * ( r43 * c34 * tmp1 )\n\t- dt * ty1 * dy3;\n b[i][j][2][3] = dt * ty2\n\t* ( - C2 * ( u[i][j+1][k][3] * tmp1 ) );\n b[i][j][2][4] = dt * ty2 * C2;\n\n b[i][j][3][0] = dt * ty2\n\t* ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )\n\t- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] );\n b[i][j][3][1] = 0.0;\n b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 );\n b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )\n\t- dt * ty1 * ( c34 * tmp1 )\n\t- dt * ty1 * dy4;\n b[i][j][3][4] = 0.0;\n\n b[i][j][4][0] = dt * ty2\n\t* ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1]\n\t\t + u[i][j+1][k][2] * u[i][j+1][k][2]\n\t\t + u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2\n\t - C1 * ( u[i][j+1][k][4] * tmp1 ) )\n\t * ( u[i][j+1][k][2] * tmp1 ) )\n\t- dt * ty1\n\t* ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) )\n\t - ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) )\n\t - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) )\n\t - c1345*tmp2*u[i][j+1][k][4] );\n b[i][j][4][1] = dt * ty2\n\t* ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )\n\t- dt * ty1\n\t* ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1];\n b[i][j][4][2] = dt * ty2\n\t* ( C1 * ( u[i][j+1][k][4] * tmp1 )\n\t - 0.50 * C2 \n\t * ( ( u[i][j+1][k][1]*u[i][j+1][k][1]\n\t\t + 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2]\n\t\t + u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) )\n\t- dt * ty1\n\t* ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2];\n b[i][j][4][3] = dt * ty2\n\t* ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )\n\t- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3];\n b[i][j][4][4] = dt * ty2\n\t* ( C1 * ( u[i][j+1][k][2] * tmp1 ) )\n\t- dt * ty1 * c1345 * tmp1\n\t- dt * ty1 * dy5;\n\n/*--------------------------------------------------------------------\nc form the third block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k+1][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n c[i][j][0][0] = - dt * tz1 * dz1;\n c[i][j][0][1] = 0.0;\n c[i][j][0][2] = 0.0;\n c[i][j][0][3] = dt * tz2;\n c[i][j][0][4] = 0.0;\n\n c[i][j][1][0] = dt * tz2\n\t* ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )\n\t- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] );\n c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )\n\t- dt * tz1 * c34 * tmp1\n\t- dt * tz1 * dz2 ;\n c[i][j][1][2] = 0.0;\n c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 );\n c[i][j][1][4] = 0.0;\n\n c[i][j][2][0] = dt * tz2\n\t* ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )\n\t- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] );\n c[i][j][2][1] = 0.0;\n c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )\n\t- dt * tz1 * ( c34 * tmp1 )\n\t- dt * tz1 * dz3;\n c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 );\n c[i][j][2][4] = 0.0;\n\n c[i][j][3][0] = dt * tz2\n\t* ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 )\n\t + 0.50 * C2\n\t * ( ( u[i][j][k+1][1] * u[i][j][k+1][1]\n\t\t + u[i][j][k+1][2] * u[i][j][k+1][2]\n\t\t + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) )\n\t- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] );\n c[i][j][3][1] = dt * tz2\n\t* ( - C2 * ( u[i][j][k+1][1] * tmp1 ) );\n c[i][j][3][2] = dt * tz2\n\t* ( - C2 * ( u[i][j][k+1][2] * tmp1 ) );\n c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 )\n\t* ( u[i][j][k+1][3] * tmp1 )\n\t- dt * tz1 * ( r43 * c34 * tmp1 )\n\t- dt * tz1 * dz4;\n c[i][j][3][4] = dt * tz2 * C2;\n\n c[i][j][4][0] = dt * tz2\n\t* ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1]\n + u[i][j][k+1][2] * u[i][j][k+1][2]\n + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2\n\t - C1 * ( u[i][j][k+1][4] * tmp1 ) )\n\t * ( u[i][j][k+1][3] * tmp1 ) )\n\t- dt * tz1\n\t* ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) )\n\t - ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) )\n\t - c1345 * tmp2 * u[i][j][k+1][4] );\n c[i][j][4][1] = dt * tz2\n\t* ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )\n\t- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1];\n c[i][j][4][2] = dt * tz2\n\t* ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )\n\t- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2];\n c[i][j][4][3] = dt * tz2\n\t* ( C1 * ( u[i][j][k+1][4] * tmp1 )\n - 0.50 * C2\n * ( ( u[i][j][k+1][1]*u[i][j][k+1][1]\n\t\t + u[i][j][k+1][2]*u[i][j][k+1][2]\n\t\t + 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) )\n\t- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3];\n c[i][j][4][4] = dt * tz2\n\t* ( C1 * ( u[i][j][k+1][3] * tmp1 ) )\n\t- dt * tz1 * c1345 * tmp1\n\t- dt * tz1 * dz5;\n }\n }\n} #pragma omp parallel for private(tmp1, tmp2, tmp3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------*/\n int i, j, k, m;\n double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0;\n\n for (m = 0; m < 5; m++) {\n sum[m] = 0.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0)", "context_chars": 100, "text": "um3=0.0, sum4=0.0;\n\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n sum[m] = 0.0;\n }\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) \n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) \n for (k = 1; k <= nz0-2; k++) {\n\t sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];\n\t sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];\n\t sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];\n\t sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];\n\t sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];\n }\n }\n } #pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) ", "context_chars": 100, "text": "+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0)\n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) \n for (k = 1; k <= nz0-2; k++) {\n\t sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];\n\t sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];\n\t sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];\n\t sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];\n\t sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];\n }\n } #pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) ", "context_chars": 100, "text": "3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) \n for (j = jst; j <= jend; j++) {\n for (k = 1; k <= nz0-2; k++) {\n\t sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];\n\t sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];\n\t sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];\n\t sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];\n\t sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];\n } #pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "0;\n sum[1] += sum1;\n sum[2] += sum2;\n sum[3] += sum3;\n sum[4] += sum4;\n }\n \n for (m = 0; m < 5; m++) {\n sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------\nc initialize\n--------------------------------------------------------------------*/\n for (i = 0; i <= ISIZ2+1; i++) {\n #pragma omp parallel for\n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "------------------------------*/\n #pragma omp parallel for \n for (i = 0; i <= ISIZ2+1; i++) {\n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(iglob, k)", "context_chars": 100, "text": "or\n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n }\n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n #pragma omp parallel for private(iglob, k) \n for (j = jbeg; j <= jfin; j++) {\n jglob = j;\n\n k = ki1;\n\n phi1[i][j] = C2*( u[i][j][k][4]\n\t\t\t- 0.50 * ( pow2(u[i][j][k][1])\n\t\t\t\t + pow2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n\n k = ki2;\n\n phi2[i][j] = C2*( u[i][j][k][4]\n\t\t\t- 0.50 * ( pow2(u[i][j][k][1])\n\t\t\t\t + pow2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n }\n } #pragma omp parallel for private(iglob, k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(iglob, k) ", "context_chars": 100, "text": "\n #pragma omp parallel for private(iglob, k)\n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n for (j = jbeg; j <= jfin; j++) {\n jglob = j;\n\n k = ki1;\n\n phi1[i][j] = C2*( u[i][j][k][4]\n\t\t\t- 0.50 * ( pow2(u[i][j][k][1])\n\t\t\t\t + pow2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n\n k = ki2;\n\n phi2[i][j] = C2*( u[i][j][k][4]\n\t\t\t- 0.50 * ( pow2(u[i][j][k][1])\n\t\t\t\t + pow2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n } #pragma omp parallel for private(iglob, k) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:frc1) ", "context_chars": 100, "text": "w2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n }\n }\n\n frc1 = 0.0;\n\n for (i = ibeg; i <= ifin1; i++) {\n #pragma omp parallel for reduction(+:frc1) \n for (j = jbeg; j <= jfin1; j++) {\n frc1 = frc1 + ( phi1[i][j]\n\t\t + phi1[i+1][j]\n\t\t + phi1[i][j+1]\n\t\t + phi1[i+1][j+1]\n\t\t + phi2[i][j]\n\t\t + phi2[i+1][j]\n\t\t + phi2[i][j+1]\n\t\t + phi2[i+1][j+1] );\n }\n } #pragma omp parallel for reduction(+:frc1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:frc1) ", "context_chars": 100, "text": " frc1 = 0.0;\n\n #pragma omp parallel for reduction(+:frc1) \n for (i = ibeg; i <= ifin1; i++) {\n for (j = jbeg; j <= jfin1; j++) {\n frc1 = frc1 + ( phi1[i][j]\n\t\t + phi1[i+1][j]\n\t\t + phi1[i][j+1]\n\t\t + phi1[i+1][j+1]\n\t\t + phi2[i][j]\n\t\t + phi2[i+1][j]\n\t\t + phi2[i][j+1]\n\t\t + phi2[i+1][j+1] );\n } #pragma omp parallel for reduction(+:frc1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------\nc initialize\n--------------------------------------------------------------------*/\n for (i = 0; i <= ISIZ2+1; i++) {\n #pragma omp parallel for \n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------------------------*/\n #pragma omp parallel for \n for (i = 0; i <= ISIZ2+1; i++) {\n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(iglob) ", "context_chars": 100, "text": " phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n }\n jglob = jbeg;\n if (jglob == ji1) {\n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n #pragma omp parallel for \n for (k = ki1; k <= ki2; k++) {\n\tphi1[i][k] = C2*( u[i][jbeg][k][4]\n\t\t\t - 0.50 * ( pow2(u[i][jbeg][k][1])\n\t\t\t\t + pow2(u[i][jbeg][k][2])\n\t\t\t\t + pow2(u[i][jbeg][k][3]) )\n\t\t\t / u[i][jbeg][k][0] );\n }\n } #pragma omp parallel for private(iglob) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ragma omp parallel for private(iglob) \n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n for (k = ki1; k <= ki2; k++) {\n\tphi1[i][k] = C2*( u[i][jbeg][k][4]\n\t\t\t - 0.50 * ( pow2(u[i][jbeg][k][1])\n\t\t\t\t + pow2(u[i][jbeg][k][2])\n\t\t\t\t + pow2(u[i][jbeg][k][3]) )\n\t\t\t / u[i][jbeg][k][0] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(iglob) ", "context_chars": 100, "text": "g][k][3]) )\n\t\t\t / u[i][jbeg][k][0] );\n }\n }\n }\n\n jglob = jfin;\n if (jglob == ji2) {\n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n #pragma omp parallel for \n for (k = ki1; k <= ki2; k++) {\n\tphi2[i][k] = C2*( u[i][jfin][k][4]\n\t\t\t - 0.50 * ( pow2(u[i][jfin][k][1])\n\t\t\t\t + pow2(u[i][jfin][k][2])\n\t\t\t\t + pow2(u[i][jfin][k][3]) )\n\t\t\t / u[i][jfin][k][0] );\n }\n } #pragma omp parallel for private(iglob) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "pragma omp parallel for private(iglob) \n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n for (k = ki1; k <= ki2; k++) {\n\tphi2[i][k] = C2*( u[i][jfin][k][4]\n\t\t\t - 0.50 * ( pow2(u[i][jfin][k][1])\n\t\t\t\t + pow2(u[i][jfin][k][2])\n\t\t\t\t + pow2(u[i][jfin][k][3]) )\n\t\t\t / u[i][jfin][k][0] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:frc2) ", "context_chars": 100, "text": "\t\t\t\t + pow2(u[i][jfin][k][3]) )\n\t\t\t / u[i][jfin][k][0] );\n }\n }\n }\n\n\n frc2 = 0.0;\n for (i = ibeg; i <= ifin1; i++) {\n #pragma omp parallel for reduction(+:frc2) \n for (k = ki1; k <= ki2-1; k++) {\n frc2 = frc2 + ( phi1[i][k]\n\t\t + phi1[i+1][k]\n\t\t + phi1[i][k+1]\n\t\t + phi1[i+1][k+1]\n\t\t + phi2[i][k]\n\t\t + phi2[i+1][k]\n\t\t + phi2[i][k+1]\n\t\t + phi2[i+1][k+1] );\n }\n } #pragma omp parallel for reduction(+:frc2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:frc2) ", "context_chars": 100, "text": " frc2 = 0.0;\n #pragma omp parallel for reduction(+:frc2) \n for (i = ibeg; i <= ifin1; i++) {\n for (k = ki1; k <= ki2-1; k++) {\n frc2 = frc2 + ( phi1[i][k]\n\t\t + phi1[i+1][k]\n\t\t + phi1[i][k+1]\n\t\t + phi1[i+1][k+1]\n\t\t + phi2[i][k]\n\t\t + phi2[i+1][k]\n\t\t + phi2[i][k+1]\n\t\t + phi2[i+1][k+1] );\n } #pragma omp parallel for reduction(+:frc2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------\nc initialize\n--------------------------------------------------------------------*/\n for (i = 0; i <= ISIZ2+1; i++) {\n #pragma omp parallel for firstprivate(i ) \n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "-------------------------------*/\n #pragma omp parallel for\n for (i = 0; i <= ISIZ2+1; i++) {\n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n } #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "im1, u51im1;\n double u21jm1, u31jm1, u41jm1, u51jm1;\n double u21km1, u31km1, u41km1, u51km1;\n\n for (i = 0; i <= nx-1; i++) {\n #pragma omp parallel for \n for (j = 0; j <= ny-1; j++) {\n #pragma omp parallel for \n for (k = 0; k <= nz-1; k++) {\n #pragma omp parallel for \n \tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = - frct[i][j][k][m];\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "e u21km1, u31km1, u41km1, u51km1;\n\n #pragma omp parallel for \n for (i = 0; i <= nx-1; i++) {\n for (j = 0; j <= ny-1; j++) {\n #pragma omp parallel for \n for (k = 0; k <= nz-1; k++) {\n #pragma omp parallel for \n \tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = - frct[i][j][k][m];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "or (i = 0; i <= nx-1; i++) {\n #pragma omp parallel for \n for (j = 0; j <= ny-1; j++) {\n for (k = 0; k <= nz-1; k++) {\n #pragma omp parallel for \n \tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = - frct[i][j][k][m];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "= 0; j <= ny-1; j++) {\n #pragma omp parallel for \n for (k = 0; k <= nz-1; k++) {\n for (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = - frct[i][j][k][m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s\n--------------------------------------------------------------------*/\n\n L1 = 0;\n L2 = nx-1;\n\n for (i = L1; i <= L2; i++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][1];\n\tu21 = u[i][j][k][1] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u21 + C2 * \n\t ( u[i][j][k][4] - q );\n\tflux[i][j][k][2] = u[i][j][k][2] * u21;\n\tflux[i][j][k][3] = u[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;\n }\n } \n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------*/\n\n L1 = 0;\n L2 = nx-1;\n\n #pragma omp parallel for \n for (i = L1; i <= L2; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][1];\n\tu21 = u[i][j][k][1] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u21 + C2 * \n\t ( u[i][j][k][4] - q );\n\tflux[i][j][k][2] = u[i][j][k][2] * u21;\n\tflux[i][j][k][3] = u[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "or (i = L1; i <= L2; i++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][1];\n\tu21 = u[i][j][k][1] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u21 + C2 * \n\t ( u[i][j][k][4] - q );\n\tflux[i][j][k][2] = u[i][j][k][2] * u21;\n\tflux[i][j][k][3] = u[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, ist1, iend1)", "context_chars": 100, "text": "][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;\n }\n } \n } \n\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, ist1, iend1)\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n\n L2 = nx-1;\n\n #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) \n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21i = tmp * u[i][j][k][1];\n\tu31i = tmp * u[i][j][k][2];\n\tu41i = tmp * u[i][j][k][3];\n\tu51i = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i-1][j][k][0];\n\n\tu21im1 = tmp * u[i-1][j][k][1];\n\tu31im1 = tmp * u[i-1][j][k][2];\n\tu41im1 = tmp * u[i-1][j][k][3];\n\tu51im1 = tmp * u[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )\n\t\t - ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )\n\t + (1.0/6.0)\n\t * tx3 * ( pow2(u21i) - pow2(u21im1) )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n #pragma omp parallel for\n for (i = ist; i <= iend; i++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dx1 * tx1 * ( u[i-1][j][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i+1][j][k][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( u[i-1][j][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i+1][j][k][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( u[i-1][j][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i+1][j][k][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( u[i-1][j][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i+1][j][k][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( u[i-1][j][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i+1][j][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[1][j][k][m] = rsd[1][j][k][m]\n\t - dssp * ( + 5.0 * u[1][j][k][m]\n\t\t - 4.0 * u[2][j][k][m]\n\t\t + u[3][j][k][m] );\n\trsd[2][j][k][m] = rsd[2][j][k][m]\n\t - dssp * ( - 4.0 * u[1][j][k][m]\n\t\t + 6.0 * u[2][j][k][m]\n\t\t - 4.0 * u[3][j][k][m]\n\t\t + u[4][j][k][m] );\n }\n\n ist1 = 3;\n iend1 = nx - 4;\n\n for (i = ist1; i <= iend1; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i-2][j][k][m]\n\t\t\t\t - 4.0 * u[i-1][j][k][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i+1][j][k][m]\n\t\t\t\t + u[i+2][j][k][m] );\n\t}\n }\n\n\n for (m = 0; m < 5; m++) {\n\trsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]\n\t - dssp * ( u[nx-5][j][k][m]\n\t\t\t\t - 4.0 * u[nx-4][j][k][m]\n\t\t\t\t + 6.0 * u[nx-3][j][k][m]\n\t\t\t\t - 4.0 * u[nx-2][j][k][m] );\n\trsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]\n\t - dssp * ( u[nx-4][j][k][m]\n\t\t\t\t - 4.0 * u[nx-3][j][k][m]\n\t\t\t\t + 5.0 * u[nx-2][j][k][m] );\n }\n }\n } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, ist1, iend1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, ist1, iend1)", "context_chars": 100, "text": "31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, ist1, iend1)\n for (j = jst; j <= jend; j++) {\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n\n L2 = nx-1;\n\n #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) \n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21i = tmp * u[i][j][k][1];\n\tu31i = tmp * u[i][j][k][2];\n\tu41i = tmp * u[i][j][k][3];\n\tu51i = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i-1][j][k][0];\n\n\tu21im1 = tmp * u[i-1][j][k][1];\n\tu31im1 = tmp * u[i-1][j][k][2];\n\tu41im1 = tmp * u[i-1][j][k][3];\n\tu51im1 = tmp * u[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )\n\t\t - ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )\n\t + (1.0/6.0)\n\t * tx3 * ( pow2(u21i) - pow2(u21im1) )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n #pragma omp parallel for\n for (i = ist; i <= iend; i++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dx1 * tx1 * ( u[i-1][j][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i+1][j][k][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( u[i-1][j][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i+1][j][k][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( u[i-1][j][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i+1][j][k][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( u[i-1][j][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i+1][j][k][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( u[i-1][j][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i+1][j][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[1][j][k][m] = rsd[1][j][k][m]\n\t - dssp * ( + 5.0 * u[1][j][k][m]\n\t\t - 4.0 * u[2][j][k][m]\n\t\t + u[3][j][k][m] );\n\trsd[2][j][k][m] = rsd[2][j][k][m]\n\t - dssp * ( - 4.0 * u[1][j][k][m]\n\t\t + 6.0 * u[2][j][k][m]\n\t\t - 4.0 * u[3][j][k][m]\n\t\t + u[4][j][k][m] );\n }\n\n ist1 = 3;\n iend1 = nx - 4;\n\n for (i = ist1; i <= iend1; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i-2][j][k][m]\n\t\t\t\t - 4.0 * u[i-1][j][k][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i+1][j][k][m]\n\t\t\t\t + u[i+2][j][k][m] );\n\t}\n }\n\n\n for (m = 0; m < 5; m++) {\n\trsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]\n\t - dssp * ( u[nx-5][j][k][m]\n\t\t\t\t - 4.0 * u[nx-4][j][k][m]\n\t\t\t\t + 6.0 * u[nx-3][j][k][m]\n\t\t\t\t - 4.0 * u[nx-2][j][k][m] );\n\trsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]\n\t - dssp * ( u[nx-4][j][k][m]\n\t\t\t\t - 4.0 * u[nx-3][j][k][m]\n\t\t\t\t + 5.0 * u[nx-2][j][k][m] );\n }\n } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, ist1, iend1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " u41i, u51i, u21im1, u31im1, u41im1, u51im1, ist1, iend1)\n for (k = 1; k <= nz - 2; k++) {\n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "(k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) ", "context_chars": 100, "text": "k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n\n L2 = nx-1;\n\n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21i = tmp * u[i][j][k][1];\n\tu31i = tmp * u[i][j][k][2];\n\tu41i = tmp * u[i][j][k][3];\n\tu51i = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i-1][j][k][0];\n\n\tu21im1 = tmp * u[i-1][j][k][1];\n\tu31im1 = tmp * u[i-1][j][k][2];\n\tu41im1 = tmp * u[i-1][j][k][3];\n\tu51im1 = tmp * u[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )\n\t\t - ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )\n\t + (1.0/6.0)\n\t * tx3 * ( pow2(u21i) - pow2(u21im1) )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ".0)\n\t * tx3 * ( pow2(u21i) - pow2(u21im1) )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n for (i = ist; i <= iend; i++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dx1 * tx1 * ( u[i-1][j][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i+1][j][k][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( u[i-1][j][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i+1][j][k][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( u[i-1][j][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i+1][j][k][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( u[i-1][j][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i+1][j][k][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( u[i-1][j][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i+1][j][k][4] );\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u31, q)", "context_chars": 100, "text": "s\n--------------------------------------------------------------------*/\n\n L1 = 0;\n L2 = ny-1;\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for private(u31, q)\n for (j = L1; j <= L2; j++) {\n #pragma omp parallel for private(u31, q) \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][2];\n\tu31 = u[i][j][k][2] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u31;\n\tflux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][3] = u[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;\n }\n }\n } #pragma omp parallel for private(u31, q)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u31, q)", "context_chars": 100, "text": "= 0;\n L2 = ny-1;\n\n #pragma omp parallel for private(u31, q)\n for (i = ist; i <= iend; i++) {\n for (j = L1; j <= L2; j++) {\n #pragma omp parallel for private(u31, q) \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][2];\n\tu31 = u[i][j][k][2] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u31;\n\tflux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][3] = u[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;\n }\n } #pragma omp parallel for private(u31, q)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u31, q) ", "context_chars": 100, "text": "<= iend; i++) {\n #pragma omp parallel for private(u31, q)\n for (j = L1; j <= L2; j++) {\n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][2];\n\tu31 = u[i][j][k][2] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u31;\n\tflux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][3] = u[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;\n } #pragma omp parallel for private(u31, q) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1, jst1, jend1) ", "context_chars": 100, "text": "[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;\n }\n }\n }\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1, jst1, jend1) \n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n\n L2 = ny-1;\n #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1) \n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21j = tmp * u[i][j][k][1];\n\tu31j = tmp * u[i][j][k][2];\n\tu41j = tmp * u[i][j][k][3];\n\tu51j = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j-1][k][0];\n\tu21jm1 = tmp * u[i][j-1][k][1];\n\tu31jm1 = tmp * u[i][j-1][k][2];\n\tu41jm1 = tmp * u[i][j-1][k][3];\n\tu51jm1 = tmp * u[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )\n\t\t - ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )\n\t + (1.0/6.0)\n\t * ty3 * ( pow2(u31j) - pow2(u31jm1) )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n #pragma omp parallel for\n for (j = jst; j <= jend; j++) {\n\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dy1 * ty1 * ( u[i][j-1][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j+1][k][0] );\n\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( u[i][j-1][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j+1][k][1] );\n\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( u[i][j-1][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j+1][k][2] );\n\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( u[i][j-1][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j+1][k][3] );\n\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( u[i][j-1][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j+1][k][4] );\n\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[i][1][k][m] = rsd[i][1][k][m]\n\t - dssp * ( + 5.0 * u[i][1][k][m]\n\t\t - 4.0 * u[i][2][k][m]\n\t\t + u[i][3][k][m] );\n\trsd[i][2][k][m] = rsd[i][2][k][m]\n\t - dssp * ( - 4.0 * u[i][1][k][m]\n\t\t + 6.0 * u[i][2][k][m]\n\t\t - 4.0 * u[i][3][k][m]\n\t\t + u[i][4][k][m] );\n }\n\n jst1 = 3;\n jend1 = ny - 4;\n for (j = jst1; j <= jend1; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j-2][k][m]\n\t\t\t\t - 4.0 * u[i][j-1][k][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j+1][k][m]\n\t\t\t\t + u[i][j+2][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\trsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]\n\t - dssp * ( u[i][ny-5][k][m]\n\t\t\t\t - 4.0 * u[i][ny-4][k][m]\n\t\t\t\t + 6.0 * u[i][ny-3][k][m]\n\t\t\t\t - 4.0 * u[i][ny-2][k][m] );\n\trsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]\n\t - dssp * ( u[i][ny-4][k][m]\n\t\t\t\t - 4.0 * u[i][ny-3][k][m]\n\t\t\t\t + 5.0 * u[i][ny-2][k][m] );\n }\n }\n } #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1, jst1, jend1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1, jst1, jend1) ", "context_chars": 100, "text": "1j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1, jst1, jend1) \n for (i = ist; i <= iend; i++) {\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n\n L2 = ny-1;\n #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1) \n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21j = tmp * u[i][j][k][1];\n\tu31j = tmp * u[i][j][k][2];\n\tu41j = tmp * u[i][j][k][3];\n\tu51j = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j-1][k][0];\n\tu21jm1 = tmp * u[i][j-1][k][1];\n\tu31jm1 = tmp * u[i][j-1][k][2];\n\tu41jm1 = tmp * u[i][j-1][k][3];\n\tu51jm1 = tmp * u[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )\n\t\t - ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )\n\t + (1.0/6.0)\n\t * ty3 * ( pow2(u31j) - pow2(u31jm1) )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n #pragma omp parallel for\n for (j = jst; j <= jend; j++) {\n\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dy1 * ty1 * ( u[i][j-1][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j+1][k][0] );\n\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( u[i][j-1][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j+1][k][1] );\n\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( u[i][j-1][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j+1][k][2] );\n\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( u[i][j-1][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j+1][k][3] );\n\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( u[i][j-1][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j+1][k][4] );\n\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[i][1][k][m] = rsd[i][1][k][m]\n\t - dssp * ( + 5.0 * u[i][1][k][m]\n\t\t - 4.0 * u[i][2][k][m]\n\t\t + u[i][3][k][m] );\n\trsd[i][2][k][m] = rsd[i][2][k][m]\n\t - dssp * ( - 4.0 * u[i][1][k][m]\n\t\t + 6.0 * u[i][2][k][m]\n\t\t - 4.0 * u[i][3][k][m]\n\t\t + u[i][4][k][m] );\n }\n\n jst1 = 3;\n jend1 = ny - 4;\n for (j = jst1; j <= jend1; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j-2][k][m]\n\t\t\t\t - 4.0 * u[i][j-1][k][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j+1][k][m]\n\t\t\t\t + u[i][j+2][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\trsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]\n\t - dssp * ( u[i][ny-5][k][m]\n\t\t\t\t - 4.0 * u[i][ny-4][k][m]\n\t\t\t\t + 6.0 * u[i][ny-3][k][m]\n\t\t\t\t - 4.0 * u[i][ny-2][k][m] );\n\trsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]\n\t - dssp * ( u[i][ny-4][k][m]\n\t\t\t\t - 4.0 * u[i][ny-3][k][m]\n\t\t\t\t + 5.0 * u[i][ny-2][k][m] );\n }\n } #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1, jst1, jend1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "1j, u51j, u21jm1, u31jm1, u41jm1, u51jm1, jst1, jend1) \n for (k = 1; k <= nz - 2; k++) {\n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1) ", "context_chars": 100, "text": "[k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n\n L2 = ny-1;\n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21j = tmp * u[i][j][k][1];\n\tu31j = tmp * u[i][j][k][2];\n\tu41j = tmp * u[i][j][k][3];\n\tu51j = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j-1][k][0];\n\tu21jm1 = tmp * u[i][j-1][k][1];\n\tu31jm1 = tmp * u[i][j-1][k][2];\n\tu41jm1 = tmp * u[i][j-1][k][3];\n\tu51jm1 = tmp * u[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )\n\t\t - ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )\n\t + (1.0/6.0)\n\t * ty3 * ( pow2(u31j) - pow2(u31jm1) )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n } #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ".0)\n\t * ty3 * ( pow2(u31j) - pow2(u31jm1) )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n for (j = jst; j <= jend; j++) {\n\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dy1 * ty1 * ( u[i][j-1][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j+1][k][0] );\n\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( u[i][j-1][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j+1][k][1] );\n\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( u[i][j-1][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j+1][k][2] );\n\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( u[i][j-1][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j+1][k][3] );\n\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( u[i][j-1][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j+1][k][4] );\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1, u41, q) ", "context_chars": 100, "text": "direction flux differences\n--------------------------------------------------------------------*/\n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for private(u41, q) \n for (k = 0; k <= nz-1; k++) {\n\tflux[i][j][k][0] = u[i][j][k][3];\n\tu41 = u[i][j][k][3] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u41;\n\tflux[i][j][k][2] = u[i][j][k][2] * u41; \n\tflux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;\n }\n\n #pragma omp parallel for \n for (k = 1; k <= nz - 2; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n }\n\n #pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1) \n for (k = 1; k <= nz-1; k++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21k = tmp * u[i][j][k][1];\n\tu31k = tmp * u[i][j][k][2];\n\tu41k = tmp * u[i][j][k][3];\n\tu51k = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j][k-1][0];\n\n\tu21km1 = tmp * u[i][j][k-1][1];\n\tu31km1 = tmp * u[i][j][k-1][2];\n\tu41km1 = tmp * u[i][j][k-1][3];\n\tu51km1 = tmp * u[i][j][k-1][4];\n\n\tflux[i][j][k][1] = tz3 * ( u21k - u21km1 );\n\tflux[i][j][k][2] = tz3 * ( u31k - u31km1 );\n\tflux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )\n\t\t - ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )\n\t + (1.0/6.0)\n\t * tz3 * ( pow2(u41k) - pow2(u41km1) )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n }\n\n #pragma omp parallel for \n for (k = 1; k <= nz - 2; k++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dz1 * tz1 * ( u[i][j][k-1][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j][k+1][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )\n\t + dz2 * tz1 * ( u[i][j][k-1][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j][k+1][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )\n\t + dz3 * tz1 * ( u[i][j][k-1][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j][k+1][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )\n\t + dz4 * tz1 * ( u[i][j][k-1][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j][k+1][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )\n\t + dz5 * tz1 * ( u[i][j][k-1][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j][k+1][4] );\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[i][j][1][m] = rsd[i][j][1][m]\n\t - dssp * ( + 5.0 * u[i][j][1][m]\n\t\t - 4.0 * u[i][j][2][m]\n\t\t + u[i][j][3][m] );\n\trsd[i][j][2][m] = rsd[i][j][2][m]\n\t - dssp * ( - 4.0 * u[i][j][1][m]\n\t\t + 6.0 * u[i][j][2][m]\n\t\t - 4.0 * u[i][j][3][m]\n\t\t + u[i][j][4][m] );\n }\n\n #pragma omp parallel for \n for (k = 3; k <= nz - 4; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j][k-2][m]\n\t\t\t\t - 4.0 * u[i][j][k-1][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j][k+1][m]\n\t\t\t\t + u[i][j][k+2][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\trsd[i][j][nz-3][m] = rsd[i][j][nz-3][m]\n\t - dssp * ( u[i][j][nz-5][m]\n\t\t\t\t - 4.0 * u[i][j][nz-4][m]\n\t\t\t\t + 6.0 * u[i][j][nz-3][m]\n\t\t\t\t - 4.0 * u[i][j][nz-2][m] );\n\trsd[i][j][nz-2][m] = rsd[i][j][nz-2][m]\n\t - dssp * ( u[i][j][nz-4][m]\n\t\t\t\t - 4.0 * u[i][j][nz-3][m]\n\t\t\t\t + 5.0 * u[i][j][nz-2][m] );\n }\n }\n } #pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1, u41, q) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(u41, q) ", "context_chars": 100, "text": "41km1, u51km1, u41, q) \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n for (k = 0; k <= nz-1; k++) {\n\tflux[i][j][k][0] = u[i][j][k][3];\n\tu41 = u[i][j][k][3] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u41;\n\tflux[i][j][k][2] = u[i][j][k][2] * u41; \n\tflux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;\n } #pragma omp parallel for private(u41, q) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "+ C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;\n }\n\n for (k = 1; k <= nz - 2; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " - C2 * q ) * u41;\n }\n\n #pragma omp parallel for \n for (k = 1; k <= nz - 2; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1) ", "context_chars": 100, "text": "][m] = rsd[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n }\n\n for (k = 1; k <= nz-1; k++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21k = tmp * u[i][j][k][1];\n\tu31k = tmp * u[i][j][k][2];\n\tu41k = tmp * u[i][j][k][3];\n\tu51k = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j][k-1][0];\n\n\tu21km1 = tmp * u[i][j][k-1][1];\n\tu31km1 = tmp * u[i][j][k-1][2];\n\tu41km1 = tmp * u[i][j][k-1][3];\n\tu51km1 = tmp * u[i][j][k-1][4];\n\n\tflux[i][j][k][1] = tz3 * ( u21k - u21km1 );\n\tflux[i][j][k][2] = tz3 * ( u31k - u31km1 );\n\tflux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )\n\t\t - ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )\n\t + (1.0/6.0)\n\t * tz3 * ( pow2(u41k) - pow2(u41km1) )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n } #pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": ".0)\n\t * tz3 * ( pow2(u41k) - pow2(u41km1) )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n }\n\n for (k = 1; k <= nz - 2; k++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dz1 * tz1 * ( u[i][j][k-1][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j][k+1][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )\n\t + dz2 * tz1 * ( u[i][j][k-1][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j][k+1][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )\n\t + dz3 * tz1 * ( u[i][j][k-1][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j][k+1][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )\n\t + dz4 * tz1 * ( u[i][j][k-1][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j][k+1][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )\n\t + dz5 * tz1 * ( u[i][j][k-1][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j][k+1][4] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "6.0 * u[i][j][2][m]\n\t\t - 4.0 * u[i][j][3][m]\n\t\t + u[i][j][4][m] );\n }\n\n for (k = 3; k <= nz - 4; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j][k-2][m]\n\t\t\t\t - 4.0 * u[i][j][k-1][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j][k+1][m]\n\t\t\t\t + u[i][j][k+2][m] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " u[i][j][4][m] );\n }\n\n #pragma omp parallel for \n for (k = 3; k <= nz - 4; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j][k-2][m]\n\t\t\t\t - 4.0 * u[i][j][k-1][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j][k+1][m]\n\t\t\t\t + u[i][j][k+2][m] );\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(iglob)", "context_chars": 100, "text": "g the top and bottom faces\n--------------------------------------------------------------------*/\n for (i = 0; i < nx; i++) {\n iglob = i;\n for (j = 0; j < ny; j++) {\n jglob = j;\n exact( iglob, jglob, 0, &u[i][j][0][0] );\n exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] );\n }\n } #pragma omp parallel for private(iglob)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(iglob)", "context_chars": 100, "text": "long north and south faces\n--------------------------------------------------------------------*/\n for (i = 0; i < nx; i++) {\n iglob = i;\n for (k = 0; k < nz; k++) {\n exact( iglob, 0, k, &u[i][0][k][0] );\n }\n } #pragma omp parallel for private(iglob)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(iglob)", "context_chars": 100, "text": " iglob = i;\n for (k = 0; k < nz; k++) {\n exact( iglob, 0, k, &u[i][0][k][0] );\n }\n }\n\n for (i = 0; i < nx; i++) {\n iglob = i;\n for (k = 0; k < nz; k++) {\n exact( iglob, ny0-1, k, &u[i][ny-1][k][0] );\n }\n } #pragma omp parallel for private(iglob)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(jglob)", "context_chars": 100, "text": " along east and west faces\n--------------------------------------------------------------------*/\n for (j = 0; j < ny; j++) {\n jglob = j;\n for (k = 0; k < nz; k++) {\n exact( 0, jglob, k, &u[0][j][k][0] );\n }\n } #pragma omp parallel for private(jglob)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(jglob)", "context_chars": 100, "text": " jglob = j;\n for (k = 0; k < nz; k++) {\n exact( 0, jglob, k, &u[0][j][k][0] );\n }\n }\n\n for (j = 0; j < ny; j++) {\n jglob = j;\n for (k = 0; k < nz; k++) {\n exact( nx0-1, jglob, k, &u[nx-1][j][k][0] );\n }\n } #pragma omp parallel for private(jglob)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(jglob, zeta, eta, iglob, xi, pxi, peta, pzeta)", "context_chars": 100, "text": "i, peta, pzeta;\n double ue_1jk[5],ue_nx0jk[5],ue_i1k[5],\n ue_iny0k[5],ue_ij1[5],ue_ijnz[5];\n\n for (j = 0; j < ny; j++) {\n jglob = j;\n for (k = 1; k < nz - 1; k++) {\n zeta = ((double)k) / (nz-1);\n if (jglob != 0 && jglob != ny0-1) {\n\teta = ( (double) (jglob) ) / (ny0-1);\n\tfor (i = 0; i < nx; i++) {\n\t iglob = i;\n\t if(iglob != 0 && iglob != nx0-1) {\n\t xi = ( (double) (iglob) ) / (nx0-1);\n\t exact (0,jglob,k,ue_1jk);\n\t exact (nx0-1,jglob,k,ue_nx0jk);\n\t exact (iglob,0,k,ue_i1k);\n\t exact (iglob,ny0-1,k,ue_iny0k);\n\t exact (iglob,jglob,0,ue_ij1);\n\t exact (iglob,jglob,nz-1,ue_ijnz);\n\t #pragma omp parallel for private(pxi, peta, pzeta) \n\t for (m = 0; m < 5; m++) {\n\t pxi = ( 1.0 - xi ) * ue_1jk[m]\n\t\t+ xi * ue_nx0jk[m];\n\t peta = ( 1.0 - eta ) * ue_i1k[m]\n\t\t+ eta * ue_iny0k[m];\n\t pzeta = ( 1.0 - zeta ) * ue_ij1[m]\n\t\t+ zeta * ue_ijnz[m];\n\n\t u[i][j][k][m] = pxi + peta + pzeta\n\t\t- pxi * peta - peta * pzeta - pzeta * pxi\n\t\t+ pxi * peta * pzeta;\n\t }\n\t }\n\t}\n }\n }\n } #pragma omp parallel for private(jglob, zeta, eta, iglob, xi, pxi, peta, pzeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(pxi, peta, pzeta) ", "context_chars": 100, "text": "b,ny0-1,k,ue_iny0k);\n\t exact (iglob,jglob,0,ue_ij1);\n\t exact (iglob,jglob,nz-1,ue_ijnz);\n\t for (m = 0; m < 5; m++) {\n\t pxi = ( 1.0 - xi ) * ue_1jk[m]\n\t\t+ xi * ue_nx0jk[m];\n\t peta = ( 1.0 - eta ) * ue_i1k[m]\n\t\t+ eta * ue_iny0k[m];\n\t pzeta = ( 1.0 - zeta ) * ue_ij1[m]\n\t\t+ zeta * ue_ijnz[m];\n\n\t u[i][j][k][m] = pxi + peta + pzeta\n\t\t- pxi * peta - peta * pzeta - pzeta * pxi\n\t\t+ pxi * peta * pzeta;\n\t } #pragma omp parallel for private(pxi, peta, pzeta) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "e, before timestepping).\n--------------------------------------------------------------------*/\n{\n for (i = 0; i < ISIZ1; i++) {\n #pragma omp parallel for \n for (j = 0; j < ISIZ2; j++) {\n #pragma omp parallel for \n for (k = 0; k < 5; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t a[i][j][k][m] = 0.0;\n\t b[i][j][k][m] = 0.0;\n\t c[i][j][k][m] = 0.0;\n\t d[i][j][k][m] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------------------------*/\n{\n #pragma omp parallel for \n for (i = 0; i < ISIZ1; i++) {\n for (j = 0; j < ISIZ2; j++) {\n #pragma omp parallel for \n for (k = 0; k < 5; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t a[i][j][k][m] = 0.0;\n\t b[i][j][k][m] = 0.0;\n\t c[i][j][k][m] = 0.0;\n\t d[i][j][k][m] = 0.0;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "or (i = 0; i < ISIZ1; i++) {\n #pragma omp parallel for \n for (j = 0; j < ISIZ2; j++) {\n for (k = 0; k < 5; k++) {\n\t#pragma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t a[i][j][k][m] = 0.0;\n\t b[i][j][k][m] = 0.0;\n\t c[i][j][k][m] = 0.0;\n\t d[i][j][k][m] = 0.0;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " for (j = 0; j < ISIZ2; j++) {\n #pragma omp parallel for \n for (k = 0; k < 5; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t a[i][j][k][m] = 0.0;\n\t b[i][j][k][m] = 0.0;\n\t c[i][j][k][m] = 0.0;\n\t d[i][j][k][m] = 0.0;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " perform SSOR iteration\n--------------------------------------------------------------------*/\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= nz - 2; k++) {\n\t #pragma omp parallel for \n\t for (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n\t }\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-------------------------*/\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= nz - 2; k++) {\n\t #pragma omp parallel for \n\t for (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n\t }\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "(i = ist; i <= iend; i++) {\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n\tfor (k = 1; k <= nz - 2; k++) {\n\t #pragma omp parallel for \n\t for (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n\t }\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= nz - 2; k++) {\n\t for (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n\t } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "c update the variables\n--------------------------------------------------------------------*/\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for\n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for\n\tfor (k = 1; k <= nz-2; k++) {\n\t #pragma omp parallel for\n\t for (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m]\n\t + tmp * rsd[i][j][k][m];\n\t }\n\t}\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "--------------------------*/\n #pragma omp parallel for\n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for\n\tfor (k = 1; k <= nz-2; k++) {\n\t #pragma omp parallel for\n\t for (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m]\n\t + tmp * rsd[i][j][k][m];\n\t }\n\t}\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r (i = ist; i <= iend; i++) {\n #pragma omp parallel for\n for (j = jst; j <= jend; j++) {\n\tfor (k = 1; k <= nz-2; k++) {\n\t #pragma omp parallel for\n\t for (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m]\n\t + tmp * rsd[i][j][k][m];\n\t }\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "r\n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for\n\tfor (k = 1; k <= nz-2; k++) {\n\t for (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m]\n\t + tmp * rsd[i][j][k][m];\n\t } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------------------------------*/\n epsilon = 1.0e-08;\n\n *class = 'U';\n *verified = TRUE;\n\n for (m = 0; m < 5; m++) {\n xcrref[m] = 1.0;\n xceref[m] = 1.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "he known reference values.\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n \n xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);\n xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);\n \n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ) ", "context_chars": 100, "text": "space\nc-------------------------------------------------------------------*/\n\n int i, j, k;\n\n for (k = 0; k < d[2]; k++) {\n\tfor (j = 0; j < d[1]; j++) {\n for (i = 0; i < d[0]; i++) {\n\t crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);\n\t }\n\t}\n } #pragma omp parallel for private(i ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "006 format(' WARNING: compiled for ', i5, ' processes. ',\n > ' Will not verify. ')*/\n\n for (i = 0;i < 3 ; i++) {\n\tdims[i][0] = NX;\n\tdims[i][1] = NY;\n\tdims[i][2] = NZ;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "or \n for (i = 0;i < 3 ; i++) {\n\tdims[i][0] = NX;\n\tdims[i][1] = NY;\n\tdims[i][2] = NZ;\n }\n\n\n for (i = 0; i < 3; i++) {\n\txstart[i] = 1;\n\txend[i] = NX;\n\tystart[i] = 1;\n yend[i] = NY;\n zstart[i] = 1;\n zend[i] = NZ;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,ii ,ii2 ,jj ,ij2 ,kk ) ", "context_chars": 100, "text": "c mod(i-1+n/2, n) - n/2\nc-------------------------------------------------------------------*/\n\n for (i = 0; i < dims[2][0]; i++) {\n\tii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;\n\tii2 = ii*ii;\n\t#pragma omp parallel for private(j) firstprivate(k ,ii ,ii2 ,jj ,ij2 ,kk ,i ) \n\tfor (j = 0; j < dims[2][1]; j++) {\n jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;\n ij2 = jj*jj+ii2;\n #pragma omp parallel for private(k) firstprivate(j ,ii ,ii2 ,jj ,ij2 ,kk ,i ) \n for (k = 0; k < dims[2][2]; k++) {\n\t\tkk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;\n\t\tindexmap[k][j][i] = kk*kk+ij2;\n\t }\n\t}\n } #pragma omp parallel for private(i ,j ,k ,ii ,ii2 ,jj ,ij2 ,kk ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(j) firstprivate(k ,ii ,ii2 ,jj ,ij2 ,kk ,i ) ", "context_chars": 100, "text": "k ) \n for (i = 0; i < dims[2][0]; i++) {\n\tii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;\n\tii2 = ii*ii;\n\tfor (j = 0; j < dims[2][1]; j++) {\n jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;\n ij2 = jj*jj+ii2;\n #pragma omp parallel for private(k) firstprivate(j ,ii ,ii2 ,jj ,ij2 ,kk ,i ) \n for (k = 0; k < dims[2][2]; k++) {\n\t\tkk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;\n\t\tindexmap[k][j][i] = kk*kk+ij2;\n\t }\n\t} #pragma omp parallel for private(j) firstprivate(k ,ii ,ii2 ,jj ,ij2 ,kk ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(k) firstprivate(j ,ii ,ii2 ,jj ,ij2 ,kk ,i ) ", "context_chars": 100, "text": " j++) {\n jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;\n ij2 = jj*jj+ii2;\n for (k = 0; k < dims[2][2]; k++) {\n\t\tkk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;\n\t\tindexmap[k][j][i] = kk*kk+ij2;\n\t } #pragma omp parallel for private(k) firstprivate(j ,ii ,ii2 ,jj ,ij2 ,kk ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------------------------*/\n\n int logd[3];\n int i, j, k, jj;\n\n for (i = 0; i < 3; i++) {\n\tlogd[i] = ilog2(d[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,jj ) ", "context_chars": 100, "text": "{\n\tlogd[i] = ilog2(d[i]);\n }\n\n{\ndcomplex y0[NX][FFTBLOCKPAD];\ndcomplex y1[NX][FFTBLOCKPAD];\n\n for (k = 0; k < d[2]; k++) {\n\tfor (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) {\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n #pragma omp parallel for private(j) firstprivate(i ,jj ,k ) \n for (j = 0; j < fftblock; j++) {\n\t\t#pragma omp parallel for private(i) firstprivate(jj ,j ,k ) \n\t\tfor (i = 0; i < d[0]; i++) {\n\n\t\t y0[i][j].real = x[k][j+jj][i].real;\n\n\t\t y0[i][j].imag = x[k][j+jj][i].imag;\n\t\t}\n\t }\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n \n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */\n cfftz (is, logd[0],\n\t\t d[0], y0, y1);\n\t \n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n #pragma omp parallel for private(j) firstprivate(fftblock ,i ,jj ,x ,k ) \n for (j = 0; j < fftblock; j++) {\n\t\tfor (i = 0; i < d[0]; i++) {\n\t\t xout[k][j+jj][i].real = y0[i][j].real;\n\t\t xout[k][j+jj][i].imag = y0[i][j].imag;\n\t\t}\n\t }\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n\t}\n } #pragma omp parallel for private(i ,j ,k ,jj ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(j) firstprivate(i ,jj ,k ) ", "context_chars": 100, "text": "ock; jj+=fftblock) {\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n for (j = 0; j < fftblock; j++) {\n\t\t#pragma omp parallel for private(i) firstprivate(jj ,j ,k ) \n\t\tfor (i = 0; i < d[0]; i++) {\n\n\t\t y0[i][j].real = x[k][j+jj][i].real;\n\n\t\t y0[i][j].imag = x[k][j+jj][i].imag;\n\t\t}\n\t } #pragma omp parallel for private(j) firstprivate(i ,jj ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i) firstprivate(jj ,j ,k ) ", "context_chars": 100, "text": "omp parallel for private(j) firstprivate(i ,jj ,k ) \n for (j = 0; j < fftblock; j++) {\n\t\tfor (i = 0; i < d[0]; i++) {\n\n\t\t y0[i][j].real = x[k][j+jj][i].real;\n\n\t\t y0[i][j].imag = x[k][j+jj][i].imag;\n\t\t} #pragma omp parallel for private(i) firstprivate(jj ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(j) firstprivate(fftblock ,i ,jj ,x ,k ) ", "context_chars": 100, "text": "r_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n for (j = 0; j < fftblock; j++) {\n\t\tfor (i = 0; i < d[0]; i++) {\n\t\t xout[k][j+jj][i].real = y0[i][j].real;\n\t\t xout[k][j+jj][i].imag = y0[i][j].imag;\n\t\t}\n\t } #pragma omp parallel for private(j) firstprivate(fftblock ,i ,jj ,x ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-----------------------------------------------------*/\n\n int logd[3];\n int i, j, k, ii;\n\n for (i = 0; i < 3; i++) {\n\tlogd[i] = ilog2(d[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,ii ) ", "context_chars": 100, "text": " {\n\tlogd[i] = ilog2(d[i]);\n }\n{\ndcomplex y0[NX][FFTBLOCKPAD];\ndcomplex y1[NX][FFTBLOCKPAD];\n\n for (k = 0; k < d[2]; k++) {\n for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n\t #pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k ) \n\t for (j = 0; j < d[1]; j++) {\n\t\t#pragma omp parallel for private(i) firstprivate(ii ,x ,fftblock ,j ,k ) \n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[j][i].real = x[k][j][i+ii].real;\n\t\t y0[j][i].imag = x[k][j][i+ii].imag;\n\t\t}\n\t }\n/*\t if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */\n\t cfftz (is, logd[1], \n\t\t d[1], y0, y1);\n \n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n #pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k ) \n for (j = 0; j < d[1]; j++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\t xout[k][j][i+ii].real = y0[j][i].real;\n\t\t xout[k][j][i+ii].imag = y0[j][i].imag;\n\t }\n\t }\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n\t}\n } #pragma omp parallel for private(i ,j ,k ,ii ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k ) ", "context_chars": 100, "text": "d[0] - fftblock; ii+=fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n\t for (j = 0; j < d[1]; j++) {\n\t\t#pragma omp parallel for private(i) firstprivate(ii ,x ,fftblock ,j ,k ) \n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[j][i].real = x[k][j][i+ii].real;\n\t\t y0[j][i].imag = x[k][j][i+ii].imag;\n\t\t}\n\t } #pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i) firstprivate(ii ,x ,fftblock ,j ,k ) ", "context_chars": 100, "text": "p parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k ) \n\t for (j = 0; j < d[1]; j++) {\n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[j][i].real = x[k][j][i+ii].real;\n\t\t y0[j][i].imag = x[k][j][i+ii].imag;\n\t\t} #pragma omp parallel for private(i) firstprivate(ii ,x ,fftblock ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k ) ", "context_chars": 100, "text": "er_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n for (j = 0; j < d[1]; j++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\t xout[k][j][i+ii].real = y0[j][i].real;\n\t\t xout[k][j][i+ii].imag = y0[j][i].imag;\n\t }\n\t } #pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------------------------------------------------*/\n\n int logd[3];\n int i, j, k, ii;\n\n for (i = 0;i < 3; i++) {\n\tlogd[i] = ilog2(d[i]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,ii ) ", "context_chars": 100, "text": " {\n\tlogd[i] = ilog2(d[i]);\n }\n{\ndcomplex y0[NX][FFTBLOCKPAD];\ndcomplex y1[NX][FFTBLOCKPAD];\n\n for (j = 0; j < d[1]; j++) {\n for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n\t #pragma omp parallel for private(k) firstprivate(i ,ii ,j ) \n\t for (k = 0; k < d[2]; k++) {\n\t\t#pragma omp parallel for private(i) firstprivate(ii ,k ,j ) \n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[k][i].real = x[k][j][i+ii].real;\n\t\t y0[k][i].imag = x[k][j][i+ii].imag;\n\t\t}\n\t }\n\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */\n cfftz (is, logd[2],\n\t\t d[2], y0, y1);\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n #pragma omp parallel for private(k) firstprivate(i ,ii ,x ,fftblock ,j ) \n for (k = 0; k < d[2]; k++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\t xout[k][j][i+ii].real = y0[k][i].real;\n\t\t xout[k][j][i+ii].imag = y0[k][i].imag;\n\t }\n\t }\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n\t}\n } #pragma omp parallel for private(i ,j ,k ,ii ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(k) firstprivate(i ,ii ,j ) ", "context_chars": 100, "text": "d[0] - fftblock; ii+=fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n\t for (k = 0; k < d[2]; k++) {\n\t\t#pragma omp parallel for private(i) firstprivate(ii ,k ,j ) \n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[k][i].real = x[k][j][i+ii].real;\n\t\t y0[k][i].imag = x[k][j][i+ii].imag;\n\t\t}\n\t } #pragma omp parallel for private(k) firstprivate(i ,ii ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i) firstprivate(ii ,k ,j ) ", "context_chars": 100, "text": " #pragma omp parallel for private(k) firstprivate(i ,ii ,j ) \n\t for (k = 0; k < d[2]; k++) {\n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[k][i].real = x[k][j][i+ii].real;\n\t\t y0[k][i].imag = x[k][j][i+ii].imag;\n\t\t} #pragma omp parallel for private(i) firstprivate(ii ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(k) firstprivate(i ,ii ,x ,fftblock ,j ) ", "context_chars": 100, "text": "r_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n for (k = 0; k < d[2]; k++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\t xout[k][j][i+ii].real = y0[k][i].real;\n\t\t xout[k][j][i+ii].imag = y0[k][i].imag;\n\t }\n\t } #pragma omp parallel for private(k) firstprivate(i ,ii ,x ,fftblock ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i) firstprivate(fftblock ,j ) ", "context_chars": 100, "text": "-------------------------------------------*/\n if (m % 2 == 1) {\n\tfor (j = 0; j < n; j++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\tx[j][i].real = y[j][i].real;\n\t\tx[j][i].imag = y[j][i].imag;\n\t } #pragma omp parallel for private(i) firstprivate(fftblock ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " int j, q,r,s, ierr;\n dcomplex chk,allchk;\n \n chk.real = 0.0;\n chk.imag = 0.0;\n\n\n for (j = 1; j <= 1024; j++) {\n\tq = j%NX+1;\n\tif (q >= xstart[0] && q <= xend[0]) {\n r = (3*j)%NY+1;\n if (r >= ystart[0] && r <= yend[0]) {\n\t\ts = (5*j)%NZ+1;\n\t\tif (s >= zstart[0] && s <= zend[0]) {\n\t\t cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]);\n\t\t}\n\t }\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " of update to the vector u\nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-------------------------------------*/\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "points[0]-2; i++) {\n #pragma omp parallel for\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-------------------------------*/\n\n int i, j, k, m, d;\n double xi, eta, zeta, u_exact[5], add;\n\n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(add) ", "context_chars": 100, "text": " k <= grid_points[2]-1; k++) {\n\tzeta = (double)k * dnzm1;\n\texact_solution(xi, eta, zeta, u_exact);\n\tfor (m = 0; m < 5; m++) {\n\t add = u[m][i][j][k] - u_exact[m];\n\t rms[m] = rms[m] + add*add;\n\t} #pragma omp parallel for private(add) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "; m++) {\n\t add = u[m][i][j][k] - u_exact[m];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (d = 0; d < 3; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n }\n rms[m] = sqrt(rms[m]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "rms[m] + add*add;\n\t}\n }\n }\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (d = 0; d < 3; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "----------------------------------------------------------*/\n\n int i, j, k, d, m;\n double add;\n\n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": ";\n double add;\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n }\n\n //for (i = 0; i <= grid_points[0]-2; i++) {\n //#pragma omp parallel for \n for (j = 0; j <= grid_points[1]-2; j++) {\n //#pragma omp parallel for \n for (k = 0; k <= grid_points[2]-2; k++) {\n\t#pragma omp parallel for private(add)\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " rms[m] = 0.0;\n }\n\n //#pragma omp parallel for \n for (i = 0; i <= grid_points[0]-2; i++) {\n //for (j = 0; j <= grid_points[1]-2; j++) {\n //#pragma omp parallel for \n for (k = 0; k <= grid_points[2]-2; k++) {\n\t#pragma omp parallel for private(add)\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "0]-2; i++) {\n //#pragma omp parallel for \n for (j = 0; j <= grid_points[1]-2; j++) {\n //for (k = 0; k <= grid_points[2]-2; k++) {\n\t#pragma omp parallel for private(add)\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(add)", "context_chars": 100, "text": "nts[1]-2; j++) {\n //#pragma omp parallel for \n for (k = 0; k <= grid_points[2]-2; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t} #pragma omp parallel for private(add)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (d = 0; d < 3; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n }\n rms[m] = sqrt(rms[m]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " rms[m] + add*add;\n\t}\n }\n }\n }\n\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n for (d = 0; d < 3; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 0; i <= grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for \n\tfor (k= 0; k <= grid_points[2]-1; k++) {\n\t forcing[m][i][j][k] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------------------------------*/\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 0; i <= grid_points[0]-1; i++) {\n #pragma omp parallel for \n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for \n\tfor (k= 0; k <= grid_points[2]-1; k++) {\n\t forcing[m][i][j][k] = 0.0;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 0; i <= grid_points[0]-1; i++) {\n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for \n\tfor (k= 0; k <= grid_points[2]-1; k++) {\n\t forcing[m][i][j][k] = 0.0;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[0]-1; i++) {\n #pragma omp parallel for \n for (j = 0; j <= grid_points[1]-1; j++) {\n\tfor (k= 0; k <= grid_points[2]-1; k++) {\n\t forcing[m][i][j][k] = 0.0;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; i <= grid_points[0]-1; i++) {\n\txi = (double)i * dnxm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[m][i] = dtemp[m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t ue[m][i] = dtemp[m];\n\t}\n\n\tdtpp = 1.0 / dtemp[0];\n\n\tfor (m = 1; m < 5; m++) {\n\t buf[m][i] = dtpp * dtemp[m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(im1, ip1) ", "context_chars": 100, "text": "[i] = 0.5 * (buf[1][i]*ue[1][i] + buf[2][i]*ue[2][i]\n\t\t + buf[3][i]*ue[3][i]);\n }\n \n for (i = 1; i <= grid_points[0]-2; i++) {\n\tim1 = i-1;\n\tip1 = i+1;\n\n\tforcing[0][i][j][k] = forcing[0][i][j][k] -\n\t tx2*( ue[1][ip1]-ue[1][im1] )+\n\t dx1tx1*(ue[0][ip1]-2.0*ue[0][i]+ue[0][im1]);\n\n\tforcing[1][i][j][k] = forcing[1][i][j][k]\n\t - tx2 * ((ue[1][ip1]*buf[1][ip1]+c2*(ue[4][ip1]-q[ip1]))-\n (ue[1][im1]*buf[1][im1]+c2*(ue[4][im1]-q[im1])))+\n\t xxcon1*(buf[1][ip1]-2.0*buf[1][i]+buf[1][im1])+\n\t dx2tx1*( ue[1][ip1]-2.0* ue[1][i]+ue[1][im1]);\n\n\tforcing[2][i][j][k] = forcing[2][i][j][k]\n\t - tx2 * (ue[2][ip1]*buf[1][ip1]-ue[2][im1]*buf[1][im1])+\n\t xxcon2*(buf[2][ip1]-2.0*buf[2][i]+buf[2][im1])+\n\t dx3tx1*( ue[2][ip1]-2.0*ue[2][i] +ue[2][im1]);\n \n\tforcing[3][i][j][k] = forcing[3][i][j][k]\n\t - tx2*(ue[3][ip1]*buf[1][ip1]-ue[3][im1]*buf[1][im1])+\n\t xxcon2*(buf[3][ip1]-2.0*buf[3][i]+buf[3][im1])+\n\t dx4tx1*( ue[3][ip1]-2.0* ue[3][i]+ ue[3][im1]);\n\n\tforcing[4][i][j][k] = forcing[4][i][j][k]\n\t - tx2*(buf[1][ip1]*(c1*ue[4][ip1]-c2*q[ip1])-\n\t\t buf[1][im1]*(c1*ue[4][im1]-c2*q[im1]))+\n\t 0.5*xxcon3*(buf[0][ip1]-2.0*buf[0][i]+\n\t\t buf[0][im1])+\n\t xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+\n\t xxcon5*(buf[4][ip1]-2.0*buf[4][i]+buf[4][im1])+\n\t dx5tx1*( ue[4][ip1]-2.0* ue[4][i]+ ue[4][im1]);\n } #pragma omp parallel for private(im1, ip1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(i) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\ti = 1;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (5.0*ue[m][i] - 4.0*ue[m][i+1] +ue[m][i+2]);\n\ti = 2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (-4.0*ue[m][i-1] + 6.0*ue[m][i] -\n \t 4.0*ue[m][i+1] + ue[m][i+2]);\n } #pragma omp parallel for private(i) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[m][i-1] + 6.0*ue[m][i] -\n \t 4.0*ue[m][i+1] + ue[m][i+2]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (i = 3; i <= grid_points[0]-4; i++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][i-2] - 4.0*ue[m][i-1] +\n\t 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]);\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[i+1] + ue[m][i+2]);\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\tfor (i = 3; i <= grid_points[0]-4; i++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][i-2] - 4.0*ue[m][i-1] +\n\t 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]);\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "(ue[m][i-2] - 4.0*ue[m][i-1] +\n\t 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\ti = grid_points[0]-3;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][i-2] - 4.0*ue[m][i-1] +\n\t 6.0*ue[m][i] - 4.0*ue[m][i+1]);\n\ti = grid_points[0]-2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][i-2] - 4.0*ue[m][i-1] + 5.0*ue[m][i]);\n } #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "0; j <= grid_points[1]-1; j++) {\n\teta = (double)j * dnym1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[m][j] = dtemp[m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t ue[m][j] = dtemp[m];\n\t}\n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m < 5; m++) {\n\t buf[m][j] = dtpp * dtemp[m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(jm1, jp1) ", "context_chars": 100, "text": "];\n\tq[j] = 0.5*(buf[1][j]*ue[1][j] + buf[2][j]*ue[2][j] +\n\t\t buf[3][j]*ue[3][j]);\n }\n\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tjm1 = j-1;\n\tjp1 = j+1;\n \n\tforcing[0][i][j][k] = forcing[0][i][j][k] -\n\t ty2*( ue[2][jp1]-ue[2][jm1] )+\n\t dy1ty1*(ue[0][jp1]-2.0*ue[0][j]+ue[0][jm1]);\n\n\tforcing[1][i][j][k] = forcing[1][i][j][k]\n\t - ty2*(ue[1][jp1]*buf[2][jp1]-ue[1][jm1]*buf[2][jm1])+\n\t yycon2*(buf[1][jp1]-2.0*buf[1][j]+buf[1][jm1])+\n\t dy2ty1*( ue[1][jp1]-2.0* ue[1][j]+ ue[1][jm1]);\n\n\tforcing[2][i][j][k] = forcing[2][i][j][k]\n\t - ty2*((ue[2][jp1]*buf[2][jp1]+c2*(ue[4][jp1]-q[jp1]))-\n\t\t (ue[2][jm1]*buf[2][jm1]+c2*(ue[4][jm1]-q[jm1])))+\n\t yycon1*(buf[2][jp1]-2.0*buf[2][j]+buf[2][jm1])+\n\t dy3ty1*( ue[2][jp1]-2.0*ue[2][j] +ue[2][jm1]);\n\n\tforcing[3][i][j][k] = forcing[3][i][j][k]\n\t - ty2*(ue[3][jp1]*buf[2][jp1]-ue[3][jm1]*buf[2][jm1])+\n\t yycon2*(buf[3][jp1]-2.0*buf[3][j]+buf[3][jm1])+\n\t dy4ty1*( ue[3][jp1]-2.0*ue[3][j]+ ue[3][jm1]);\n\n\tforcing[4][i][j][k] = forcing[4][i][j][k]\n\t - ty2*(buf[2][jp1]*(c1*ue[4][jp1]-c2*q[jp1])-\n\t\t buf[2][jm1]*(c1*ue[4][jm1]-c2*q[jm1]))+\n\t 0.5*yycon3*(buf[0][jp1]-2.0*buf[0][j]+\n\t\t buf[0][jm1])+\n\t yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+\n\t yycon5*(buf[4][jp1]-2.0*buf[4][j]+buf[4][jm1])+\n\t dy5ty1*(ue[4][jp1]-2.0*ue[4][j]+ue[4][jm1]);\n } #pragma omp parallel for private(jm1, jp1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(j) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tj = 1;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (5.0*ue[m][j] - 4.0*ue[m][j+1] +ue[m][j+2]);\n\tj = 2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (-4.0*ue[m][j-1] + 6.0*ue[m][j] -\n\t 4.0*ue[m][j+1] + ue[m][j+2]);\n } #pragma omp parallel for private(j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[m][j-1] + 6.0*ue[m][j] -\n\t 4.0*ue[m][j+1] + ue[m][j+2]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n \tfor (j = 3; j <= grid_points[1]-4; j++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][j-2] - 4.0*ue[m][j-1] +\n\t 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]);\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "j+1] + ue[m][j+2]);\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\tfor (j = 3; j <= grid_points[1]-4; j++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][j-2] - 4.0*ue[m][j-1] +\n\t 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]);\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(j) ", "context_chars": 100, "text": "(ue[m][j-2] - 4.0*ue[m][j-1] +\n\t 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tj = grid_points[1]-3;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][j-2] - 4.0*ue[m][j-1] +\n\t 6.0*ue[m][j] - 4.0*ue[m][j+1]);\n\tj = grid_points[1]-2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][j-2] - 4.0*ue[m][j-1] + 5.0*ue[m][j]);\n\n } #pragma omp parallel for private(j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "; k <= grid_points[2]-1; k++) {\n\tzeta = (double)k * dnzm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[m][k] = dtemp[m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ma omp parallel for \n\tfor (m = 0; m < 5; m++) {\n\t ue[m][k] = dtemp[m];\n\t}\n\n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m < 5; m++) {\n\t buf[m][k] = dtpp * dtemp[m];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(km1, kp1) ", "context_chars": 100, "text": "];\n\tq[k] = 0.5*(buf[1][k]*ue[1][k] + buf[2][k]*ue[2][k] +\n\t\t buf[3][k]*ue[3][k]);\n }\n\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tkm1 = k-1;\n\tkp1 = k+1;\n \n\tforcing[0][i][j][k] = forcing[0][i][j][k] -\n\t tz2*( ue[3][kp1]-ue[3][km1] )+\n\t dz1tz1*(ue[0][kp1]-2.0*ue[0][k]+ue[0][km1]);\n\n\tforcing[1][i][j][k] = forcing[1][i][j][k]\n\t - tz2 * (ue[1][kp1]*buf[3][kp1]-ue[1][km1]*buf[3][km1])+\n\t zzcon2*(buf[1][kp1]-2.0*buf[1][k]+buf[1][km1])+\n\t dz2tz1*( ue[1][kp1]-2.0* ue[1][k]+ ue[1][km1]);\n\n\tforcing[2][i][j][k] = forcing[2][i][j][k]\n\t - tz2 * (ue[2][kp1]*buf[3][kp1]-ue[2][km1]*buf[3][km1])+\n\t zzcon2*(buf[2][kp1]-2.0*buf[2][k]+buf[2][km1])+\n\t dz3tz1*(ue[2][kp1]-2.0*ue[2][k]+ue[2][km1]);\n\n\tforcing[3][i][j][k] = forcing[3][i][j][k]\n\t - tz2 * ((ue[3][kp1]*buf[3][kp1]+c2*(ue[4][kp1]-q[kp1]))-\n\t\t (ue[3][km1]*buf[3][km1]+c2*(ue[4][km1]-q[km1])))+\n\t zzcon1*(buf[3][kp1]-2.0*buf[3][k]+buf[3][km1])+\n\t dz4tz1*( ue[3][kp1]-2.0*ue[3][k] +ue[3][km1]);\n\n\tforcing[4][i][j][k] = forcing[4][i][j][k]\n\t - tz2 * (buf[3][kp1]*(c1*ue[4][kp1]-c2*q[kp1])-\n\t\t buf[3][km1]*(c1*ue[4][km1]-c2*q[km1]))+\n\t 0.5*zzcon3*(buf[0][kp1]-2.0*buf[0][k]\n\t\t +buf[0][km1])+\n\t zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+\n\t zzcon5*(buf[4][kp1]-2.0*buf[4][k]+buf[4][km1])+\n\t dz5tz1*( ue[4][kp1]-2.0*ue[4][k]+ ue[4][km1]);\n } #pragma omp parallel for private(km1, kp1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tk = 1;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (5.0*ue[m][k] - 4.0*ue[m][k+1] +ue[m][k+2]);\n\tk = 2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (-4.0*ue[m][k-1] + 6.0*ue[m][k] -\n\t 4.0*ue[m][k+1] + ue[m][k+2]);\n } #pragma omp parallel for private(k) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[m][k-1] + 6.0*ue[m][k] -\n\t 4.0*ue[m][k+1] + ue[m][k+2]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for \n\tfor (k = 3; k <= grid_points[2]-4; k++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][k-2] - 4.0*ue[m][k-1] +\n\t 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]);\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "+1] + ue[m][k+2]);\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n\tfor (k = 3; k <= grid_points[2]-4; k++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][k-2] - 4.0*ue[m][k-1] +\n\t 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]);\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k) ", "context_chars": 100, "text": "(ue[m][k-2] - 4.0*ue[m][k-1] +\n\t 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tk = grid_points[2]-3;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][k-2] - 4.0*ue[m][k-1] +\n\t 6.0*ue[m][k] - 4.0*ue[m][k+1]);\n\tk = grid_points[2]-2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][k-2] - 4.0*ue[m][k-1] + 5.0*ue[m][k]);\n } #pragma omp parallel for private(k) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " of the forcing function, \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------------------------------*/\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "xi, eta, zeta \nc-------------------------------------------------------------------*/\n\n int m;\n\n for (m = 0; m < 5; m++) {\n dtemp[m] = ce[0][m] +\n xi*(ce[1][m] + xi*(ce[4][m] + \n\t\t\t xi*(ce[7][m] + xi*ce[10][m]))) +\n eta*(ce[2][m] + eta*(ce[5][m] + \n\t\t\t eta*(ce[8][m] + eta*ce[11][m])))+\n zeta*(ce[3][m] + zeta*(ce[6][m] +\n\t\t\t\t zeta*(ce[9][m] + \n\t\t\t\t zeta*ce[12][m])));\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ng the whole thing here. \nc-------------------------------------------------------------------*/\n\n for (i = 0; i <= IMAX-1; i++) {\n #pragma omp parallel for \n for (j = 0; j <= IMAX-1; j++) {\n #pragma omp parallel for \n for (k = 0; k <= IMAX-1; k++) {\n\tu[0][i][j][k] = 1.0;\n\tu[1][i][j][k] = 0.0;\n\tu[2][i][j][k] = 0.0;\n\tu[3][i][j][k] = 0.0;\n\tu[4][i][j][k] = 1.0;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------------------------*/\n\n #pragma omp parallel for \n for (i = 0; i <= IMAX-1; i++) {\n for (j = 0; j <= IMAX-1; j++) {\n #pragma omp parallel for \n for (k = 0; k <= IMAX-1; k++) {\n\tu[0][i][j][k] = 1.0;\n\tu[1][i][j][k] = 0.0;\n\tu[2][i][j][k] = 0.0;\n\tu[3][i][j][k] = 0.0;\n\tu[4][i][j][k] = 1.0;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "(i = 0; i <= IMAX-1; i++) {\n #pragma omp parallel for \n for (j = 0; j <= IMAX-1; j++) {\n for (k = 0; k <= IMAX-1; k++) {\n\tu[0][i][j][k] = 1.0;\n\tu[1][i][j][k] = 0.0;\n\tu[2][i][j][k] = 0.0;\n\tu[3][i][j][k] = 0.0;\n\tu[4][i][j][k] = 1.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(Pxi ,Peta ,Pzeta) ", "context_chars": 100, "text": "\tfor (iz = 0; iz < 2; iz++) {\n\t exact_solution(xi, eta, (double)iz, \n\t\t\t &Pface[iz][2][0]);\n\t}\n\n\tfor (m = 0; m < 5; m++) {\n\t Pxi = xi * Pface[1][0][m] + \n\t (1.0-xi) * Pface[0][0][m];\n\t Peta = eta * Pface[1][1][m] + \n\t (1.0-eta) * Pface[0][1][m];\n\t Pzeta = zeta * Pface[1][2][m] + \n\t (1.0-zeta) * Pface[0][2][m];\n \n\t u[m][i][j][k] = Pxi + Peta + Pzeta - \n\t Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + \n\t Pxi*Peta*Pzeta;\n\n\t} #pragma omp parallel for private(Pxi ,Peta ,Pzeta) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "d_points[1]; j++) {\n eta = (double)j * dnym1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "eft hand side for starters\nc-------------------------------------------------------------------*/\n for (n = 0; n < 15; n++) {\n #pragma omp parallel for \n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[n][i][j][k] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------------------------------*/\n #pragma omp parallel for \n for (n = 0; n < 15; n++) {\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[n][i][j][k] = 0.0;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "= 0; n < 15; n++) {\n #pragma omp parallel for \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[n][i][j][k] = 0.0;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "id_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[n][i][j][k] = 0.0;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ll, but \nc convenient\nc-------------------------------------------------------------------*/\n for (n = 0; n < 3; n++) {\n #pragma omp parallel for \n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[5*n+2][i][j][k] = 1.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------------------------------------*/\n #pragma omp parallel for \n for (n = 0; n < 3; n++) {\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[5*n+2][i][j][k] = 1.0;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " = 0; n < 3; n++) {\n #pragma omp parallel for \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[5*n+2][i][j][k] = 1.0;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "id_points[0]; i++) {\n #pragma omp parallel for \n for (j = 0; j < grid_points[1]; j++) {\n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[5*n+2][i][j][k] = 1.0;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(ru1)", "context_chars": 100, "text": "-*/\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n for (i = 0; i <= grid_points[0]-1; i++) {\n\tru1 = c3c4*rho_i[i][j][k];\n\tcv[i] = us[i][j][k];\n\trhon[i] = max(dx2+con43*ru1, \n\t\t max(dx5+c1c5*ru1,\n\t\t\t max(dxmax+ru1,\n\t\t\t dx1)));\n } #pragma omp parallel for private(ru1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "= max(dx2+con43*ru1, \n\t\t max(dx5+c1c5*ru1,\n\t\t\t max(dxmax+ru1,\n\t\t\t dx1)));\n }\n\n for (i = 1; i <= grid_points[0]-2; i++) {\n\tlhs[0][i][j][k] = 0.0;\n\tlhs[1][i][j][k] = - dttx2 * cv[i-1] - dttx1 * rhon[i-1];\n\tlhs[2][i][j][k] = 1.0 + c2dttx1 * rhon[i];\n\tlhs[3][i][j][k] = dttx2 * cv[i+1] - dttx1 * rhon[i+1];\n\tlhs[4][i][j][k] = 0.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "--------------------*/\n\n i = 1;\n #pragma omp for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;\n lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz6;\n lhs[3][i+1][j][k] = lhs[3][i+1][j][k] - comz4;\n lhs[4][i+1][j][k] = lhs[4][i+1][j][k] + comz1;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][k] = lhs[3][i+1][j][k] - comz4;\n lhs[4][i+1][j][k] = lhs[4][i+1][j][k] + comz1;\n }\n }\n\n for (i = 3; i <= grid_points[0]-4; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[k] + comz1;\n }\n }\n\n #pragma omp parallel for \n for (i = 3; i <= grid_points[0]-4; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[0]-4; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n }\n\n i = grid_points[0]-3;\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\n lhs[0][i+1][j][k] = lhs[0][i+1][j][k] + comz1;\n lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;\n lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz5;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " i = grid_points[0]-3;\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\n lhs[0][i+1][j][k] = lhs[0][i+1][j][k] + comz1;\n lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;\n lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz5;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ing to \nc the first \nc-------------------------------------------------------------------*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------------------*/\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "nts[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(ru1)", "context_chars": 100, "text": "-*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n for (j = 0; j <= grid_points[1]-1; j++) {\n\tru1 = c3c4*rho_i[i][j][k];\n\tcv[j] = vs[i][j][k];\n\trhoq[j] = max(dy3 + con43 * ru1,\n\t\t max(dy5 + c1c5*ru1,\n\t\t\t max(dymax + ru1,\n\t\t\t dy1)));\n } #pragma omp parallel for private(ru1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " ru1,\n\t\t max(dy5 + c1c5*ru1,\n\t\t\t max(dymax + ru1,\n\t\t\t dy1)));\n }\n \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tlhs[0][i][j][k] = 0.0;\n\tlhs[1][i][j][k] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1];\n\tlhs[2][i][j][k] = 1.0 + c2dtty1 * rhoq[j];\n\tlhs[3][i][j][k] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1];\n\tlhs[4][i][j][k] = 0.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n\n j = 1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n \n lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;\n lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz6;\n lhs[3][i][j+1][k] = lhs[3][i][j+1][k] - comz4;\n lhs[4][i][j+1][k] = lhs[4][i][j+1][k] + comz1;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------*/\n\n j = 1;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n \n lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;\n lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz6;\n lhs[3][i][j+1][k] = lhs[3][i][j+1][k] - comz4;\n lhs[4][i][j+1][k] = lhs[4][i][j+1][k] + comz1;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][k] = lhs[3][i][j+1][k] - comz4;\n lhs[4][i][j+1][k] = lhs[4][i][j+1][k] + comz1;\n }\n }\n\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 3; j <= grid_points[1]-4; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[k] + comz1;\n }\n }\n\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 3; j <= grid_points[1]-4; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[0]-2; i++) {\n #pragma omp parallel for \n for (j = 3; j <= grid_points[1]-4; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n\n j = grid_points[1]-3;\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\n lhs[0][i][j+1][k] = lhs[0][i][j+1][k] + comz1;\n lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;\n lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz5;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "actors \nc-------------------------------------------------------------------*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------------------*/\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "nts[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(ru1)", "context_chars": 100, "text": "-*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 0; k <= grid_points[2]-1; k++) {\n\tru1 = c3c4*rho_i[i][j][k];\n\tcv[k] = ws[i][j][k];\n\trhos[k] = max(dz4 + con43 * ru1,\n\t\t max(dz5 + c1c5 * ru1,\n\t\t\t max(dzmax + ru1,\n\t\t\t dz1)));\n } #pragma omp parallel for private(ru1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " + con43 * ru1,\n\t\t max(dz5 + c1c5 * ru1,\n\t\t\t max(dzmax + ru1,\n\t\t\t dz1)));\n }\n\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = 0.0;\n\tlhs[1][i][j][k] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1];\n\tlhs[2][i][j][k] = 1.0 + c2dttz1 * rhos[k];\n\tlhs[3][i][j][k] = dttz2 * cv[k+1] - dttz1 * rhos[k+1];\n\tlhs[4][i][j][k] = 0.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n\n k = 1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n\n lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;\n lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz6;\n lhs[3][i][j][k+1] = lhs[3][i][j][k+1] - comz4;\n lhs[4][i][j][k+1] = lhs[4][i][j][k+1] + comz1;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----------*/\n\n k = 1;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n\n lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;\n lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz6;\n lhs[3][i][j][k+1] = lhs[3][i][j][k+1] - comz4;\n lhs[4][i][j][k+1] = lhs[4][i][j][k+1] + comz1;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "k+1] = lhs[3][i][j][k+1] - comz4;\n lhs[4][i][j][k+1] = lhs[4][i][j][k+1] + comz1;\n }\n }\n\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 3; k <= grid_points[2]-4; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "+1] + comz1;\n }\n }\n\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 3; k <= grid_points[2]-4; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "nts[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 3; k <= grid_points[2]-4; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n }\n\n k = grid_points[2]-3;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\n lhs[0][i][j][k+1] = lhs[0][i][j][k+1] + comz1;\n lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;\n lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz5;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " k = grid_points[2]-3;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\n lhs[0][i][j][k+1] = lhs[0][i][j][k+1] + comz1;\n lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;\n lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz5;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ther factors (u+c), (u-c) \nc-------------------------------------------------------------------*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------------------*/\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "nts[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) ", "context_chars": 100, "text": "------------------------------------------*/\n\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n \n\tt1 = bt * r3;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = -r2;\n\trhs[1][i][j][k] = r1;\n\trhs[2][i][j][k] = bt * ( r4 - r5 );\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) ", "context_chars": 100, "text": " parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n \n\tt1 = bt * r3;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = -r2;\n\trhs[1][i][j][k] = r1;\n\trhs[2][i][j][k] = bt * ( r4 - r5 );\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n } #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) ", "context_chars": 100, "text": "rallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n \n\tt1 = bt * r3;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = -r2;\n\trhs[1][i][j][k] = r1;\n\trhs[2][i][j][k] = bt * ( r4 - r5 );\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n } #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) ", "context_chars": 100, "text": "-----------------------------------------*/\n\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 )\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 )\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = bt * r1;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = bt * ( r4 - r5 );\n\trhs[1][i][j][k] = -r3;\n\trhs[2][i][j][k] = r2;\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 )", "context_chars": 100, "text": " parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 )\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = bt * r1;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = bt * ( r4 - r5 );\n\trhs[1][i][j][k] = -r3;\n\trhs[2][i][j][k] = r2;\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n } #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 )", "context_chars": 100, "text": "rallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 )\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = bt * r1;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = bt * ( r4 - r5 );\n\trhs[1][i][j][k] = -r3;\n\trhs[2][i][j][k] = r2;\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n } #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(rho_inv ,aux)", "context_chars": 100, "text": " and the speed of sound. \nc-------------------------------------------------------------------*/\n\n for (i = 0; i <= grid_points[0]-1; i++) {\n #pragma omp parallel for private(rho_inv ,aux)\n for (j = 0; j <= grid_points[1]-1; j++) {\n #pragma omp parallel for private(rho_inv ,aux)\n for (k = 0; k <= grid_points[2]-1; k++) {\n\trho_inv = 1.0/u[0][i][j][k];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[1][i][j][k] * rho_inv;\n\tvs[i][j][k] = u[2][i][j][k] * rho_inv;\n\tws[i][j][k] = u[3][i][j][k] * rho_inv;\n\tsquare[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] + \n\t\t\t\tu[2][i][j][k]*u[2][i][j][k] +\n\t\t\t\tu[3][i][j][k]*u[3][i][j][k] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n/*--------------------------------------------------------------------\nc (do not need speed and ainx until the lhs computation)\nc-------------------------------------------------------------------*/\n\taux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]);\n\taux = sqrt(aux);\n\tspeed[i][j][k] = aux;\n\tainv[i][j][k] = 1.0/aux;\n }\n }\n } #pragma omp parallel for private(rho_inv ,aux)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(rho_inv ,aux)", "context_chars": 100, "text": "/\n\n #pragma omp parallel for private(rho_inv ,aux)\n for (i = 0; i <= grid_points[0]-1; i++) {\n for (j = 0; j <= grid_points[1]-1; j++) {\n #pragma omp parallel for private(rho_inv ,aux)\n for (k = 0; k <= grid_points[2]-1; k++) {\n\trho_inv = 1.0/u[0][i][j][k];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[1][i][j][k] * rho_inv;\n\tvs[i][j][k] = u[2][i][j][k] * rho_inv;\n\tws[i][j][k] = u[3][i][j][k] * rho_inv;\n\tsquare[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] + \n\t\t\t\tu[2][i][j][k]*u[2][i][j][k] +\n\t\t\t\tu[3][i][j][k]*u[3][i][j][k] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n/*--------------------------------------------------------------------\nc (do not need speed and ainx until the lhs computation)\nc-------------------------------------------------------------------*/\n\taux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]);\n\taux = sqrt(aux);\n\tspeed[i][j][k] = aux;\n\tainv[i][j][k] = 1.0/aux;\n }\n } #pragma omp parallel for private(rho_inv ,aux)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(rho_inv ,aux)", "context_chars": 100, "text": " #pragma omp parallel for private(rho_inv ,aux)\n for (j = 0; j <= grid_points[1]-1; j++) {\n for (k = 0; k <= grid_points[2]-1; k++) {\n\trho_inv = 1.0/u[0][i][j][k];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[1][i][j][k] * rho_inv;\n\tvs[i][j][k] = u[2][i][j][k] * rho_inv;\n\tws[i][j][k] = u[3][i][j][k] * rho_inv;\n\tsquare[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] + \n\t\t\t\tu[2][i][j][k]*u[2][i][j][k] +\n\t\t\t\tu[3][i][j][k]*u[3][i][j][k] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n/*--------------------------------------------------------------------\nc (do not need speed and ainx until the lhs computation)\nc-------------------------------------------------------------------*/\n\taux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]);\n\taux = sqrt(aux);\n\tspeed[i][j][k] = aux;\n\tainv[i][j][k] = 1.0/aux;\n } #pragma omp parallel for private(rho_inv ,aux)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "oundary \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 0; i <= grid_points[0]-1; i++) {\n #pragma omp parallel for\n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for\n\tfor (k = 0; k <= grid_points[2]-1; k++) {\n\t rhs[m][i][j][k] = forcing[m][i][j][k];\n\t}\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "--------------------------------------*/\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n for (i = 0; i <= grid_points[0]-1; i++) {\n #pragma omp parallel for\n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for\n\tfor (k = 0; k <= grid_points[2]-1; k++) {\n\t rhs[m][i][j][k] = forcing[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 0; i <= grid_points[0]-1; i++) {\n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for\n\tfor (k = 0; k <= grid_points[2]-1; k++) {\n\t rhs[m][i][j][k] = forcing[m][i][j][k];\n\t}\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "points[0]-1; i++) {\n #pragma omp parallel for\n for (j = 0; j <= grid_points[1]-1; j++) {\n\tfor (k = 0; k <= grid_points[2]-1; k++) {\n\t rhs[m][i][j][k] = forcing[m][i][j][k];\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(uijk, up1, um1)", "context_chars": 100, "text": "mpute xi-direction fluxes \nc-------------------------------------------------------------------*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(uijk, up1, um1)\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(uijk, up1, um1)\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * \n\t (u[0][i+1][j][k] - 2.0*u[0][i][j][k] + \n\t u[0][i-1][j][k]) -\n\t tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * \n\t (u[1][i+1][j][k] - 2.0*u[1][i][j][k] + \n\t u[1][i-1][j][k]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[1][i+1][j][k]*up1 - \n\t\t u[1][i-1][j][k]*um1 +\n\t\t (u[4][i+1][j][k]- square[i+1][j][k]-\n\t\t u[4][i-1][j][k]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * \n\t (u[2][i+1][j][k] - 2.0*u[2][i][j][k] +\n\t u[2][i-1][j][k]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[2][i+1][j][k]*up1 - \n\t\t u[2][i-1][j][k]*um1);\n\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * \n\t (u[3][i+1][j][k] - 2.0*u[3][i][j][k] +\n\t u[3][i-1][j][k]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[3][i+1][j][k]*up1 - \n\t\t u[3][i-1][j][k]*um1);\n\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * \n\t (u[4][i+1][j][k] - 2.0*u[4][i][j][k] +\n\t u[4][i-1][j][k]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i-1][j][k]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[4][i+1][j][k] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[4][i-1][j][k] - \n\t\t c2*square[i-1][j][k])*um1 );\n }\n }\n } #pragma omp parallel for private(uijk, up1, um1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(uijk, up1, um1)", "context_chars": 100, "text": "\n #pragma omp parallel for private(uijk, up1, um1)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(uijk, up1, um1)\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * \n\t (u[0][i+1][j][k] - 2.0*u[0][i][j][k] + \n\t u[0][i-1][j][k]) -\n\t tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * \n\t (u[1][i+1][j][k] - 2.0*u[1][i][j][k] + \n\t u[1][i-1][j][k]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[1][i+1][j][k]*up1 - \n\t\t u[1][i-1][j][k]*um1 +\n\t\t (u[4][i+1][j][k]- square[i+1][j][k]-\n\t\t u[4][i-1][j][k]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * \n\t (u[2][i+1][j][k] - 2.0*u[2][i][j][k] +\n\t u[2][i-1][j][k]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[2][i+1][j][k]*up1 - \n\t\t u[2][i-1][j][k]*um1);\n\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * \n\t (u[3][i+1][j][k] - 2.0*u[3][i][j][k] +\n\t u[3][i-1][j][k]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[3][i+1][j][k]*up1 - \n\t\t u[3][i-1][j][k]*um1);\n\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * \n\t (u[4][i+1][j][k] - 2.0*u[4][i][j][k] +\n\t u[4][i-1][j][k]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i-1][j][k]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[4][i+1][j][k] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[4][i-1][j][k] - \n\t\t c2*square[i-1][j][k])*um1 );\n }\n } #pragma omp parallel for private(uijk, up1, um1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(uijk, up1, um1)", "context_chars": 100, "text": "pragma omp parallel for private(uijk, up1, um1)\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * \n\t (u[0][i+1][j][k] - 2.0*u[0][i][j][k] + \n\t u[0][i-1][j][k]) -\n\t tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * \n\t (u[1][i+1][j][k] - 2.0*u[1][i][j][k] + \n\t u[1][i-1][j][k]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[1][i+1][j][k]*up1 - \n\t\t u[1][i-1][j][k]*um1 +\n\t\t (u[4][i+1][j][k]- square[i+1][j][k]-\n\t\t u[4][i-1][j][k]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * \n\t (u[2][i+1][j][k] - 2.0*u[2][i][j][k] +\n\t u[2][i-1][j][k]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[2][i+1][j][k]*up1 - \n\t\t u[2][i-1][j][k]*um1);\n\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * \n\t (u[3][i+1][j][k] - 2.0*u[3][i][j][k] +\n\t u[3][i-1][j][k]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[3][i+1][j][k]*up1 - \n\t\t u[3][i-1][j][k]*um1);\n\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * \n\t (u[4][i+1][j][k] - 2.0*u[4][i][j][k] +\n\t u[4][i-1][j][k]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i-1][j][k]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[4][i+1][j][k] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[4][i-1][j][k] - \n\t\t c2*square[i-1][j][k])*um1 );\n } #pragma omp parallel for private(uijk, up1, um1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "n \nc-------------------------------------------------------------------*/\n\n i = 1;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +\n\t u[m][i+2][j][k]);\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "----------------------------*/\n\n i = 1;\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +\n\t u[m][i+2][j][k]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +\n\t u[m][i+2][j][k]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +\n\t u[m][i+2][j][k]);\n }\n }\n }\n i = 2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "+2][j][k]);\n }\n }\n }\n i = 2;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][i-1][j][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 3*1; i <= grid_points[0]-3*1-1; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "+ u[m][i+2][j][k]);\n }\n }\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 3*1; i <= grid_points[0]-3*1-1; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "< 5; m++) {\n #pragma omp parallel for \n for (i = 3*1; i <= grid_points[0]-3*1-1; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[0]-3*1-1; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "- 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t}\n }\n }\n }\n\n i = grid_points[0]-3;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n }\n\n i = grid_points[0]-3;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "[k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );\n }\n }\n }\n\n i = grid_points[0]-2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +\n\t 5.0*u[m][i][j][k] );\n }\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n }\n\n i = grid_points[0]-2;\n #pragma omp parallel for\n for (m = 0; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +\n\t 5.0*u[m][i][j][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +\n\t 5.0*u[m][i][j][k] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(vijk, vp1, vm1)", "context_chars": 100, "text": "pute eta-direction fluxes \nc-------------------------------------------------------------------*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(vijk, vp1, vm1) \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(vijk, vp1, vm1) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * \n\t (u[0][i][j+1][k] - 2.0*u[0][i][j][k] + \n\t u[0][i][j-1][k]) -\n\t ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * \n\t (u[1][i][j+1][k] - 2.0*u[1][i][j][k] + \n\t u[1][i][j-1][k]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[1][i][j+1][k]*vp1 - \n\t\t u[1][i][j-1][k]*vm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * \n\t (u[2][i][j+1][k] - 2.0*u[2][i][j][k] + \n\t u[2][i][j-1][k]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[2][i][j+1][k]*vp1 - \n\t\t u[2][i][j-1][k]*vm1 +\n\t\t (u[4][i][j+1][k] - square[i][j+1][k] - \n\t\t u[4][i][j-1][k] + square[i][j-1][k])\n\t\t *c2);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * \n\t (u[3][i][j+1][k] - 2.0*u[3][i][j][k] + \n\t u[3][i][j-1][k]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[3][i][j+1][k]*vp1 - \n\t\t u[3][i][j-1][k]*vm1);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * \n\t (u[4][i][j+1][k] - 2.0*u[4][i][j][k] + \n\t u[4][i][j-1][k]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j-1][k]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[4][i][j+1][k] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[4][i][j-1][k] - \n\t\t c2*square[i][j-1][k]) * vm1);\n }\n }\n } #pragma omp parallel for private(vijk, vp1, vm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(vijk, vp1, vm1) ", "context_chars": 100, "text": "\n #pragma omp parallel for private(vijk, vp1, vm1)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(vijk, vp1, vm1) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * \n\t (u[0][i][j+1][k] - 2.0*u[0][i][j][k] + \n\t u[0][i][j-1][k]) -\n\t ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * \n\t (u[1][i][j+1][k] - 2.0*u[1][i][j][k] + \n\t u[1][i][j-1][k]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[1][i][j+1][k]*vp1 - \n\t\t u[1][i][j-1][k]*vm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * \n\t (u[2][i][j+1][k] - 2.0*u[2][i][j][k] + \n\t u[2][i][j-1][k]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[2][i][j+1][k]*vp1 - \n\t\t u[2][i][j-1][k]*vm1 +\n\t\t (u[4][i][j+1][k] - square[i][j+1][k] - \n\t\t u[4][i][j-1][k] + square[i][j-1][k])\n\t\t *c2);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * \n\t (u[3][i][j+1][k] - 2.0*u[3][i][j][k] + \n\t u[3][i][j-1][k]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[3][i][j+1][k]*vp1 - \n\t\t u[3][i][j-1][k]*vm1);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * \n\t (u[4][i][j+1][k] - 2.0*u[4][i][j][k] + \n\t u[4][i][j-1][k]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j-1][k]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[4][i][j+1][k] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[4][i][j-1][k] - \n\t\t c2*square[i][j-1][k]) * vm1);\n }\n } #pragma omp parallel for private(vijk, vp1, vm1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(vijk, vp1, vm1) ", "context_chars": 100, "text": "ragma omp parallel for private(vijk, vp1, vm1) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * \n\t (u[0][i][j+1][k] - 2.0*u[0][i][j][k] + \n\t u[0][i][j-1][k]) -\n\t ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * \n\t (u[1][i][j+1][k] - 2.0*u[1][i][j][k] + \n\t u[1][i][j-1][k]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[1][i][j+1][k]*vp1 - \n\t\t u[1][i][j-1][k]*vm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * \n\t (u[2][i][j+1][k] - 2.0*u[2][i][j][k] + \n\t u[2][i][j-1][k]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[2][i][j+1][k]*vp1 - \n\t\t u[2][i][j-1][k]*vm1 +\n\t\t (u[4][i][j+1][k] - square[i][j+1][k] - \n\t\t u[4][i][j-1][k] + square[i][j-1][k])\n\t\t *c2);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * \n\t (u[3][i][j+1][k] - 2.0*u[3][i][j][k] + \n\t u[3][i][j-1][k]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[3][i][j+1][k]*vp1 - \n\t\t u[3][i][j-1][k]*vm1);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * \n\t (u[4][i][j+1][k] - 2.0*u[4][i][j][k] + \n\t u[4][i][j-1][k]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j-1][k]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[4][i][j+1][k] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[4][i][j-1][k] - \n\t\t c2*square[i][j-1][k]) * vm1);\n } #pragma omp parallel for private(vijk, vp1, vm1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ipation \nc-------------------------------------------------------------------*/\n\n j = 1;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +\n\t u[m][i][j+2][k]);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "--------------------------*/\n\n j = 1;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +\n\t u[m][i][j+2][k]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +\n\t u[m][i][j+2][k]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +\n\t u[m][i][j+2][k]);\n }\n }\n }\n\n j = 2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][j+2][k]);\n }\n }\n }\n\n j = 2;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][i][j-1][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "+ u[m][i][j+2][k]);\n }\n }\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "]-2; i++) {\n #pragma omp parallel for \n for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t}\n }\n }\n }\n \n j = grid_points[1]-3;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n }\n \n j = grid_points[1]-3;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );\n }\n }\n }\n\n j = grid_points[1]-2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +\n\t 5.0*u[m][i][j][k] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n }\n\n j = grid_points[1]-2;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +\n\t 5.0*u[m][i][j][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +\n\t 5.0*u[m][i][j][k] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(wijk, wp1, wm1)", "context_chars": 100, "text": "ute zeta-direction fluxes \nc-------------------------------------------------------------------*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(wijk, wp1, wm1)\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(wijk, wp1, wm1)\n for (k = 1; k <= grid_points[2]-2; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * \n\t (u[0][i][j][k+1] - 2.0*u[0][i][j][k] + \n\t u[0][i][j][k-1]) -\n\t tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * \n\t (u[1][i][j][k+1] - 2.0*u[1][i][j][k] + \n\t u[1][i][j][k-1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[1][i][j][k+1]*wp1 - \n\t\t u[1][i][j][k-1]*wm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * \n\t (u[2][i][j][k+1] - 2.0*u[2][i][j][k] + \n\t u[2][i][j][k-1]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[2][i][j][k+1]*wp1 - \n\t\t u[2][i][j][k-1]*wm1);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * \n\t (u[3][i][j][k+1] - 2.0*u[3][i][j][k] + \n\t u[3][i][j][k-1]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[3][i][j][k+1]*wp1 - \n\t\t u[3][i][j][k-1]*wm1 +\n\t\t (u[4][i][j][k+1] - square[i][j][k+1] - \n\t\t u[4][i][j][k-1] + square[i][j][k-1])\n\t\t *c2);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * \n\t (u[4][i][j][k+1] - 2.0*u[4][i][j][k] + \n\t u[4][i][j][k-1]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j][k-1]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[4][i][j][k+1] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[4][i][j][k-1] - \n\t\t c2*square[i][j][k-1])*wm1);\n }\n }\n } #pragma omp parallel for private(wijk, wp1, wm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(wijk, wp1, wm1)", "context_chars": 100, "text": "/\n #pragma omp parallel for private(wijk, wp1, wm1)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(wijk, wp1, wm1)\n for (k = 1; k <= grid_points[2]-2; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * \n\t (u[0][i][j][k+1] - 2.0*u[0][i][j][k] + \n\t u[0][i][j][k-1]) -\n\t tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * \n\t (u[1][i][j][k+1] - 2.0*u[1][i][j][k] + \n\t u[1][i][j][k-1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[1][i][j][k+1]*wp1 - \n\t\t u[1][i][j][k-1]*wm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * \n\t (u[2][i][j][k+1] - 2.0*u[2][i][j][k] + \n\t u[2][i][j][k-1]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[2][i][j][k+1]*wp1 - \n\t\t u[2][i][j][k-1]*wm1);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * \n\t (u[3][i][j][k+1] - 2.0*u[3][i][j][k] + \n\t u[3][i][j][k-1]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[3][i][j][k+1]*wp1 - \n\t\t u[3][i][j][k-1]*wm1 +\n\t\t (u[4][i][j][k+1] - square[i][j][k+1] - \n\t\t u[4][i][j][k-1] + square[i][j][k-1])\n\t\t *c2);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * \n\t (u[4][i][j][k+1] - 2.0*u[4][i][j][k] + \n\t u[4][i][j][k-1]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j][k-1]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[4][i][j][k+1] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[4][i][j][k-1] - \n\t\t c2*square[i][j][k-1])*wm1);\n }\n } #pragma omp parallel for private(wijk, wp1, wm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(wijk, wp1, wm1)", "context_chars": 100, "text": "pragma omp parallel for private(wijk, wp1, wm1)\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * \n\t (u[0][i][j][k+1] - 2.0*u[0][i][j][k] + \n\t u[0][i][j][k-1]) -\n\t tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * \n\t (u[1][i][j][k+1] - 2.0*u[1][i][j][k] + \n\t u[1][i][j][k-1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[1][i][j][k+1]*wp1 - \n\t\t u[1][i][j][k-1]*wm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * \n\t (u[2][i][j][k+1] - 2.0*u[2][i][j][k] + \n\t u[2][i][j][k-1]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[2][i][j][k+1]*wp1 - \n\t\t u[2][i][j][k-1]*wm1);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * \n\t (u[3][i][j][k+1] - 2.0*u[3][i][j][k] + \n\t u[3][i][j][k-1]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[3][i][j][k+1]*wp1 - \n\t\t u[3][i][j][k-1]*wm1 +\n\t\t (u[4][i][j][k+1] - square[i][j][k+1] - \n\t\t u[4][i][j][k-1] + square[i][j][k-1])\n\t\t *c2);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * \n\t (u[4][i][j][k+1] - 2.0*u[4][i][j][k] + \n\t u[4][i][j][k-1]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j][k-1]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[4][i][j][k+1] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[4][i][j][k-1] - \n\t\t c2*square[i][j][k-1])*wm1);\n } #pragma omp parallel for private(wijk, wp1, wm1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n\n k = 1;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +\n\t u[m][i][j][k+2]);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------------------------*/\n\n k = 1;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +\n\t u[m][i][j][k+2]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +\n\t u[m][i][j][k+2]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +\n\t u[m][i][j][k+2]);\n }\n }\n }\n\n k = 2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][j][k+2]);\n }\n }\n }\n\n k = 2;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][i][j][k-1] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 3*1; k <= grid_points[2]-3*1-1; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "+ u[m][i][j][k+2]);\n }\n }\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 3*1; k <= grid_points[2]-3*1-1; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 3*1; k <= grid_points[2]-3*1-1; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 3*1; k <= grid_points[2]-3*1-1; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t}\n }\n }\n }\n \n k = grid_points[2]-3;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n }\n \n k = grid_points[2]-3;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );\n }\n }\n }\n\n k = grid_points[2]-2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +\n\t 5.0*u[m][i][j][k] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n }\n\n k = grid_points[2]-2;\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +\n\t 5.0*u[m][i][j][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +\n\t 5.0*u[m][i][j][k] );\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +\n\t 5.0*u[m][i][j][k] );\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": ".0*u[m][i][j][k] );\n }\n }\n }\n\n #pragma omp parallel for \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ints[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)", "context_chars": 100, "text": "---*/\n\n int i, j, k;\n double t1, t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3, \n r4, r5, ac2inv;\n\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)\n for (j = 1; j <= grid_points[1]-2; j++) {\n#pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tru1 = rho_i[i][j][k];\n\tuu = us[i][j][k];\n\tvv = vs[i][j][k];\n\tww = ws[i][j][k];\n\tac = speed[i][j][k];\n\tac2inv = ainv[i][j][k]*ainv[i][j][k];\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 - \n\t\t\t vv*r3 - ww*r4 + r5 );\n\tt2 = bt * ru1 * ( uu * r1 - r2 );\n\tt3 = ( bt * ru1 * ac ) * t1;\n\n\trhs[0][i][j][k] = r1 - t1;\n\trhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 );\n\trhs[2][i][j][k] = ru1 * ( vv*r1 - r3 );\n\trhs[3][i][j][k] = - t2 + t3;\n\trhs[4][i][j][k] = t2 + t3;\n }\n }\n } #pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)", "context_chars": 100, "text": "uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n#pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tru1 = rho_i[i][j][k];\n\tuu = us[i][j][k];\n\tvv = vs[i][j][k];\n\tww = ws[i][j][k];\n\tac = speed[i][j][k];\n\tac2inv = ainv[i][j][k]*ainv[i][j][k];\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 - \n\t\t\t vv*r3 - ww*r4 + r5 );\n\tt2 = bt * ru1 * ( uu * r1 - r2 );\n\tt3 = ( bt * ru1 * ac ) * t1;\n\n\trhs[0][i][j][k] = r1 - t1;\n\trhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 );\n\trhs[2][i][j][k] = ru1 * ( vv*r1 - r3 );\n\trhs[3][i][j][k] = - t2 + t3;\n\trhs[4][i][j][k] = t2 + t3;\n }\n } #pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)", "context_chars": 100, "text": ", uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)\n for (j = 1; j <= grid_points[1]-2; j++) {\nfor (k = 1; k <= grid_points[2]-2; k++) {\n\n\tru1 = rho_i[i][j][k];\n\tuu = us[i][j][k];\n\tvv = vs[i][j][k];\n\tww = ws[i][j][k];\n\tac = speed[i][j][k];\n\tac2inv = ainv[i][j][k]*ainv[i][j][k];\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 - \n\t\t\t vv*r3 - ww*r4 + r5 );\n\tt2 = bt * ru1 * ( uu * r1 - r2 );\n\tt3 = ( bt * ru1 * ac ) * t1;\n\n\trhs[0][i][j][k] = r1 - t1;\n\trhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 );\n\trhs[2][i][j][k] = ru1 * ( vv*r1 - r3 );\n\trhs[3][i][j][k] = - t2 + t3;\n\trhs[4][i][j][k] = t2 + t3;\n } #pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(t1 ,t2 ,t3 ,ac ,xvel ,yvel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 )", "context_chars": 100, "text": "vel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(t1 ,t2 ,t3 ,ac ,xvel ,yvel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 )\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\txvel = us[i][j][k];\n\tyvel = vs[i][j][k];\n\tzvel = ws[i][j][k];\n\tac = speed[i][j][k];\n\tacinv = ainv[i][j][k];\n\n\tac2u = ac*ac;\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tuzik1 = u[0][i][j][k];\n\tbtuz = bt * uzik1;\n\n\tt1 = btuz*acinv * (r4 + r5);\n\tt2 = r3 + t1;\n\tt3 = btuz * (r4 - r5);\n\n\trhs[0][i][j][k] = t2;\n\trhs[1][i][j][k] = -uzik1*r2 + xvel*t2;\n\trhs[2][i][j][k] = uzik1*r1 + yvel*t2;\n\trhs[3][i][j][k] = zvel*t2 + t3;\n\trhs[4][i][j][k] = uzik1*(-xvel*r2 + yvel*r1) + \n\t qs[i][j][k]*t2 + c2iv*ac2u*t1 + zvel*t3;\n }\n } #pragma omp parallel for private(t1 ,t2 ,t3 ,ac ,xvel ,yvel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(t1 ,t2 ,t3 ,ac ,xvel ,yvel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 )", "context_chars": 100, "text": " ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 )\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\txvel = us[i][j][k];\n\tyvel = vs[i][j][k];\n\tzvel = ws[i][j][k];\n\tac = speed[i][j][k];\n\tacinv = ainv[i][j][k];\n\n\tac2u = ac*ac;\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tuzik1 = u[0][i][j][k];\n\tbtuz = bt * uzik1;\n\n\tt1 = btuz*acinv * (r4 + r5);\n\tt2 = r3 + t1;\n\tt3 = btuz * (r4 - r5);\n\n\trhs[0][i][j][k] = t2;\n\trhs[1][i][j][k] = -uzik1*r2 + xvel*t2;\n\trhs[2][i][j][k] = uzik1*r1 + yvel*t2;\n\trhs[3][i][j][k] = zvel*t2 + t3;\n\trhs[4][i][j][k] = uzik1*(-xvel*r2 + yvel*r1) + \n\t qs[i][j][k]*t2 + c2iv*ac2u*t1 + zvel*t3;\n } #pragma omp parallel for private(t1 ,t2 ,t3 ,ac ,xvel ,yvel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 )"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "----------------------------------------*/\n error_norm(xce);\n compute_rhs();\n\n rhs_norm(xcr);\n\n for (m = 0; m < 5; m++) {\n xcr[m] = xcr[m] / dt;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " \n for (m = 0; m < 5; m++) {\n xcr[m] = xcr[m] / dt;\n }\n\n *class = 'U';\n *verified = TRUE;\n\n for (m = 0; m < 5; m++) {\n xcrref[m] = 1.0;\n xceref[m] = 1.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "he known reference values.\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n \n xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]) ;\n xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);\n \n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1, m) ", "context_chars": 100, "text": "------*/\n n = 0;\n for (i = 0; i <= grid_points[0]-3; i++) {\n i1 = i + 1;\n i2 = i + 2;\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(fac1, m) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i2][j][k] = rhs[m][i2][j][k] -\n\t lhs[n+0][i2][j][k]*rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for private(fac1, m) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1, m) ", "context_chars": 100, "text": "\n #pragma omp parallel for private(fac1, m) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i2][j][k] = rhs[m][i2][j][k] -\n\t lhs[n+0][i2][j][k]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for private(fac1, m) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(m, fac1, fac2) ", "context_chars": 100, "text": "--------------------------------------------*/\n\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(m, fac1, fac2) \n for (k = 1; k <= grid_points[2]-2; k++) {\n fac1 = 1.0/lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\tlhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\tlhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n }\n\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i1][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];\n }\n }\n } #pragma omp parallel for private(m, fac1, fac2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(m, fac1, fac2) ", "context_chars": 100, "text": "\n #pragma omp parallel for private(m, fac1, fac2) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n fac1 = 1.0/lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\tlhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\tlhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n }\n\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i1][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];\n }\n } #pragma omp parallel for private(m, fac1, fac2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n, i1, i2, fac1) ", "context_chars": 100, "text": " factors \n--------------------------------------------------------------------*/\n\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = 0; i <= grid_points[0]-3; i++) {\n i1 = i + 1;\n i2 = i + 2;\n #pragma omp parallel for private(n, i1, i2, fac1)\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\t lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\t rhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n\t lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];\n\t lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];\n\t rhs[m][i2][j][k] = rhs[m][i2][j][k] -\n\t lhs[n+0][i2][j][k]*rhs[m][i][j][k];\n\t}\n }\n }\n\n/*--------------------------------------------------------------------\nc And again the last two rows separately\n--------------------------------------------------------------------*/\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n \n #pragma omp parallel for private(fac1, fac2)\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\trhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately\n--------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i1][j][k];\n\trhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];\n\n }\n }\n } #pragma omp parallel for private(n, i1, i2, fac1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n, i1, i2, fac1)", "context_chars": 100, "text": "m-3+1)*5;\n for (i = 0; i <= grid_points[0]-3; i++) {\n i1 = i + 1;\n i2 = i + 2;\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\t lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\t rhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n\t lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];\n\t lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];\n\t rhs[m][i2][j][k] = rhs[m][i2][j][k] -\n\t lhs[n+0][i2][j][k]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for private(n, i1, i2, fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1, fac2)", "context_chars": 100, "text": "----------------------------------*/\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\trhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately\n--------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i1][j][k];\n\trhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];\n\n }\n } #pragma omp parallel for private(fac1, fac2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----*/\n\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n n = 0;\n for (m = 0; m < 3; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 0; m < 3; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n) ", "context_chars": 100, "text": " {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n }\n }\n }\n\n for (m = 3; m < 5; m++) {\n #pragma omp parallel for private(n)\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(n) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n }\n }\n } #pragma omp parallel for private(n) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n)", "context_chars": 100, "text": "1][j][k];\n }\n }\n }\n\n #pragma omp parallel for private(n) \n for (m = 3; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for private(n) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n }\n } #pragma omp parallel for private(n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n) ", "context_chars": 100, "text": " m++) {\n #pragma omp parallel for private(n)\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n } #pragma omp parallel for private(n) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------*/\n n = 0;\n for (i = grid_points[0]-3; i >= 0; i--) {\n i1 = i + 1;\n i2 = i + 2;\n for (m = 0; m < 3; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " i1 = i + 1;\n i2 = i + 2;\n #pragma omp parallel for \n for (m = 0; m < 3; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 3; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n, i1, i2)", "context_chars": 100, "text": " And the remaining two\n--------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = grid_points[0]-3; i >= 0; i--) {\n i1 = i + 1;\n i2 = i + 2;\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t}\n }\n }\n } #pragma omp parallel for private(n, i1, i2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "m-3+1)*5;\n for (i = grid_points[0]-3; i >= 0; i--) {\n i1 = i + 1;\n i2 = i + 2;\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1)", "context_chars": 100, "text": "hsy();\n\n n = 0;\n\n for (j = 0; j <= grid_points[1]-3; j++) {\n j1 = j + 1;\n j2 = j + 2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(fac1)\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];\n\tlhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j2][k] = rhs[m][i][j2][k] -\n\t lhs[n+0][i][j2][k]*rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for private(fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1)", "context_chars": 100, "text": "+ 2;\n #pragma omp parallel for private(fac1)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];\n\tlhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j2][k] = rhs[m][i][j2][k] -\n\t lhs[n+0][i][j2][k]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for private(fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1, fac2)", "context_chars": 100, "text": "--------------------------------------------*/\n\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(fac1, fac2)\n for (k = 1; k <= grid_points[2]-2; k++) {\n fac1 = 1./lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\tlhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\tlhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i][j1][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];\n }\n }\n } #pragma omp parallel for private(fac1, fac2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1, fac2)", "context_chars": 100, "text": "]-1;\n #pragma omp parallel for private(fac1, fac2)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n fac1 = 1./lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\tlhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\tlhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i][j1][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];\n }\n } #pragma omp parallel for private(fac1, fac2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n, j1, j2, fac1) ", "context_chars": 100, "text": "c factors \n--------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (j = 0; j <= grid_points[1]-3; j++) {\n j1 = j + 1;\n j2 = j + 2;\n #pragma omp parallel for private(n, j1, j2, fac1)\n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\t rhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n\t lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];\n\t rhs[m][i][j2][k] = rhs[m][i][j2][k] -\n\t lhs[n+0][i][j2][k]*rhs[m][i][j][k];\n\t}\n }\n }\n\n/*--------------------------------------------------------------------\nc And again the last two rows separately\n--------------------------------------------------------------------*/\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n #pragma omp parallel for private(fac1, fac2)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\trhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately \n--------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i][j1][k];\n\trhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];\n }\n }\n } #pragma omp parallel for private(n, j1, j2, fac1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n, j1, j2, fac1)", "context_chars": 100, "text": "m-3+1)*5;\n for (j = 0; j <= grid_points[1]-3; j++) {\n j1 = j + 1;\n j2 = j + 2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\t rhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n\t lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];\n\t rhs[m][i][j2][k] = rhs[m][i][j2][k] -\n\t lhs[n+0][i][j2][k]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for private(n, j1, j2, fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1, fac2)", "context_chars": 100, "text": "---------------------------------------*/\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\trhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately \n--------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i][j1][k];\n\trhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];\n }\n } #pragma omp parallel for private(fac1, fac2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----*/\n\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n n = 0;\n for (m = 0; m < 3; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " 0; m < 3; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n) ", "context_chars": 100, "text": " {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n }\n }\n }\n\n for (m = 3; m < 5; m++) {\n #pragma omp parallel for private(n)\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(n) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n }\n }\n } #pragma omp parallel for private(n) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n)", "context_chars": 100, "text": "][j1][k];\n }\n }\n }\n\n #pragma omp parallel for private(n) \n for (m = 3; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(n) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n }\n } #pragma omp parallel for private(n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n) ", "context_chars": 100, "text": " m++) {\n #pragma omp parallel for private(n)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n } #pragma omp parallel for private(n) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(j1, j2)", "context_chars": 100, "text": "rst three factors\n--------------------------------------------------------------------*/\n n = 0;\n for (m = 0; m < 3; m++) {\n for (j = grid_points[1]-3; j >= 0; j--) {\n j1 = j + 1;\n j2 = j + 2;\n #pragma omp parallel for private(j1, j2)\n for (i = 1; i <= grid_points[0]-2; i++) {\n\t#pragma omp parallel for private(j1, j2) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t}\n }\n }\n } #pragma omp parallel for private(j1, j2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(j1, j2)", "context_chars": 100, "text": "3; m++) {\n for (j = grid_points[1]-3; j >= 0; j--) {\n j1 = j + 1;\n j2 = j + 2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n\t#pragma omp parallel for private(j1, j2) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t}\n } #pragma omp parallel for private(j1, j2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(j1, j2) ", "context_chars": 100, "text": " 2;\n #pragma omp parallel for private(j1, j2)\n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t} #pragma omp parallel for private(j1, j2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n, j1, j2) ", "context_chars": 100, "text": " And the remaining two\n--------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (j = grid_points[1]-3; j >= 0; j--) {\n j1 = j + 1;\n j2 = j1 + 1;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t}\n }\n }\n } #pragma omp parallel for private(n, j1, j2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "m-3+1)*5;\n for (j = grid_points[1]-3; j >= 0; j--) {\n j1 = j + 1;\n j2 = j1 + 1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k1, k2, fac1)", "context_chars": 100, "text": "ON \nc-------------------------------------------------------------------*/\n\n lhsz();\n\n n = 0;\n\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(k1, k2, fac1)\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\tlhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n }\n }\n } #pragma omp parallel for private(k1, k2, fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k1, k2, fac1)", "context_chars": 100, "text": ";\n\n #pragma omp parallel for private(k1, k2, fac1)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\tlhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for private(k1, k2, fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1)", "context_chars": 100, "text": "---------------------------------------------*/\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(fac1) \n for (j = 1; j <= grid_points[1]-2; j++) {\n fac1 = 1./lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\tlhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\tlhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n }\n\n/*--------------------------------------------------------------------\nc scale the last row immediately\nc-------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i][j][k1];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];\n }\n }\n } #pragma omp parallel for private(fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1) ", "context_chars": 100, "text": "ints[2]-1;\n #pragma omp parallel for private(fac1)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n fac1 = 1./lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\tlhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\tlhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n }\n\n/*--------------------------------------------------------------------\nc scale the last row immediately\nc-------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i][j][k1];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];\n }\n } #pragma omp parallel for private(fac1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n, k1, k2, fac1) ", "context_chars": 100, "text": "u-c factors \nc-------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n #pragma omp parallel for private(k1, k2, fac1)\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(k1, k2, fac1)\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n }\n }\n\n/*--------------------------------------------------------------------\nc And again the last two rows separately\nc-------------------------------------------------------------------*/\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n #pragma omp parallel for private(fac1, fac2)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\trhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately (some of this is overkill\nc if this is the last cell)\nc-------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i][j][k1];\n\trhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];\n\n }\n }\n } #pragma omp parallel for private(n, k1, k2, fac1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k1, k2, fac1)", "context_chars": 100, "text": "ragma omp parallel for private(n, k1, k2, fac1) \n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(k1, k2, fac1)\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for private(k1, k2, fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k1, k2, fac1)", "context_chars": 100, "text": " #pragma omp parallel for private(k1, k2, fac1)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for private(k1, k2, fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(fac1, fac2)", "context_chars": 100, "text": "---------------------------------------*/\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\trhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately (some of this is overkill\nc if this is the last cell)\nc-------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i][j][k1];\n\trhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];\n\n }\n } #pragma omp parallel for private(fac1, fac2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----*/\n\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n n = 0;\n for (m = 0; m < 3; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " 0; m < 3; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n) ", "context_chars": 100, "text": " {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n }\n }\n }\n\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n }\n }\n } #pragma omp parallel for private(n) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n\n #pragma omp parallel for private(n) \n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " n = (m-3+1)*5;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k1, k2) ", "context_chars": 100, "text": "rst three factors\nc-------------------------------------------------------------------*/\n n = 0;\n for (m = 0; m < 3; m++) {\n #pragma omp parallel for private(k1, k2)\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(k1, k2)\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n }\n }\n } #pragma omp parallel for private(k1, k2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k1, k2)", "context_chars": 100, "text": "------------*/\n n = 0;\n #pragma omp parallel for private(k1, k2) \n for (m = 0; m < 3; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(k1, k2)\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n }\n } #pragma omp parallel for private(k1, k2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k1, k2)", "context_chars": 100, "text": " {\n #pragma omp parallel for private(k1, k2)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n } #pragma omp parallel for private(k1, k2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(n, k1, k2) ", "context_chars": 100, "text": " And the remaining two\nc-------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n #pragma omp parallel for private(k1, k2)\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(k1, k2) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n }\n }\n } #pragma omp parallel for private(n, k1, k2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k1, k2)", "context_chars": 100, "text": "/\n #pragma omp parallel for private(n, k1, k2) \n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for private(k1, k2) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n }\n } #pragma omp parallel for private(k1, k2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(k1, k2) ", "context_chars": 100, "text": "5;\n #pragma omp parallel for private(k1, k2)\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n } #pragma omp parallel for private(k1, k2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "uctured OpenMP C version\"\n\t \" - EP Benchmark\\n\");\n sprintf(size, \"%12.0f\", pow(2.0, M+1));\n for (j = 13; j >= 1; j--) {\n\tif (size[j] == '.') size[j] = ' ';\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "e.\n*/\n vranlc(0, &(dum[0]), dum[1], &(dum[2]));\n dum[0] = randlc(&(dum[1]), dum[2]);\n \n for (i = 0; i < 2*NK; i++) x[i] = -1.0e99;\n \n Mops = log(sqrt(fabs(max(1.0, 1.0))));\n\n timer_clear(1);\n timer_clear(2);\n timer_clear(3);\n timer_start(1);\n\n vranlc(0, &t1, A, x);\n\n/* Compute AN = A ^ (2 * NK) (mod 2^46). */\n\n t1 = A;\n\n for ( i = 1; i <= MK+1; i++) {\n\tt2 = randlc(&t1, t1);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "2 = randlc(&t1, t1);\n }\n\n an = t1;\n tt = S;\n gc = 0.0;\n sx = 0.0;\n sy = 0.0;\n\n for ( i = 0; i <= NQ - 1; i++) {\n\tq[i] = 0.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " t2, t3, t4, x1, x2;\n int kk, i, ik, l;\n double qq[NQ];\t\t/* private copy of q[0:NQ-1] */\n\n for (i = 0; i < NQ; i++) qq[i] = 0.0;\n\n //#pragma omp parallel for copyin(x, qq) private(x1, x2, t1, t2, t3, t4, ik, kk, i, l) reduction(+:sx) reduction(+:sy) \n for (k = 1; k <= np; k++) {\n\tkk = k_offset + k;\n\tt1 = S;\n\tt2 = an;\n\n/* Find starting seed t1 for this kk. */\n\n\tfor (i = 1; i <= 100; i++) {\n ik = kk / 2;\n if (2 * ik != kk) t3 = randlc(&t1, t2);\n if (ik == 0) break;\n t3 = randlc(&t2, t2);\n kk = ik;\n\t}\n\n/* Compute uniform pseudorandom numbers. */\n\n\tif (TIMERS_ENABLED == TRUE) timer_start(3);\n\tvranlc(2*NK, &t1, A, x-1);\n\tif (TIMERS_ENABLED == TRUE) timer_stop(3);\n\n/*\nc Compute Gaussian deviates by acceptance-rejection method and \nc tally counts in concentric square annuli. This loop is not \nc vectorizable.\n*/\n\tif (TIMERS_ENABLED == TRUE) timer_start(2);\n\n\tfor ( i = 0; i < NK; i++) {\n x1 = 2.0 * x[2*i] - 1.0;\n x2 = 2.0 * x[2*i+1] - 1.0;\n t1 = pow2(x1) + pow2(x2);\n if (t1 <= 1.0) {\n\t\tt2 = sqrt(-2.0 * log(t1) / t1);\n\t\tt3 = (x1 * t2);\t\t\t\t/* Xi */\n\t\tt4 = (x2 * t2);\t\t\t\t/* Yi */\n\t\tl = max(fabs(t3), fabs(t4));\n\t\tqq[l] += 1.0;\t\t\t\t/* counts */\n\t\tsx = sx + t3;\t\t\t\t/* sum of Xi */\n\t\tsy = sy + t4;\t\t\t\t/* sum of Yi */\n }\n\t}\n\tif (TIMERS_ENABLED == TRUE) timer_stop(2);\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for copyin(x, qq) private(x1, x2, t1, t2, t3, t4, ik, kk, i, l) reduction(+:sx) reduction(+:sy) ", "context_chars": 100, "text": "opy of q[0:NQ-1] */\n\n #pragma omp parallel for \n for (i = 0; i < NQ; i++) qq[i] = 0.0;\n\n //for (k = 1; k <= np; k++) {\n\tkk = k_offset + k;\n\tt1 = S;\n\tt2 = an;\n\n/* Find starting seed t1 for this kk. */\n\n\tfor (i = 1; i <= 100; i++) {\n ik = kk / 2;\n if (2 * ik != kk) t3 = randlc(&t1, t2);\n if (ik == 0) break;\n t3 = randlc(&t2, t2);\n kk = ik;\n\t}\n\n/* Compute uniform pseudorandom numbers. */\n\n\tif (TIMERS_ENABLED == TRUE) timer_start(3);\n\tvranlc(2*NK, &t1, A, x-1);\n\tif (TIMERS_ENABLED == TRUE) timer_stop(3);\n\n/*\nc Compute Gaussian deviates by acceptance-rejection method and \nc tally counts in concentric square annuli. This loop is not \nc vectorizable.\n*/\n\tif (TIMERS_ENABLED == TRUE) timer_start(2);\n\n\tfor ( i = 0; i < NK; i++) {\n x1 = 2.0 * x[2*i] - 1.0;\n x2 = 2.0 * x[2*i+1] - 1.0;\n t1 = pow2(x1) + pow2(x2);\n if (t1 <= 1.0) {\n\t\tt2 = sqrt(-2.0 * log(t1) / t1);\n\t\tt3 = (x1 * t2);\t\t\t\t/* Xi */\n\t\tt4 = (x2 * t2);\t\t\t\t/* Yi */\n\t\tl = max(fabs(t3), fabs(t4));\n\t\tqq[l] += 1.0;\t\t\t\t/* counts */\n\t\tsx = sx + t3;\t\t\t\t/* sum of Xi */\n\t\tsy = sy + t4;\t\t\t\t/* sum of Yi */\n }\n\t}\n\tif (TIMERS_ENABLED == TRUE) timer_stop(2);\n } #pragma omp parallel for copyin(x, qq) private(x1, x2, t1, t2, t3, t4, ik, kk, i, l) reduction(+:sx) reduction(+:sy) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ";\t\t\t\t/* sum of Yi */\n }\n\t}\n\tif (TIMERS_ENABLED == TRUE) timer_stop(2);\n }\n {\n for (i = 0; i <= NQ - 1; i++) q[i] += qq[i];\n }\n//#if defined(_OPENMP)\n// nthreads = omp_get_num_threads();\n// /* _OPENMP */ \n} /* end of parallel region */ \n\n #pragma omp parallel for private(i ) reduction(+:gc) \n for (i = 0; i <= NQ-1; i++) {\n gc = gc + q[i];\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:gc) ", "context_chars": 100, "text": "threads = omp_get_num_threads();\n//#endif /* _OPENMP */ \n} /* end of parallel region */ \n\n for (i = 0; i <= NQ-1; i++) {\n gc = gc + q[i];\n } #pragma omp parallel for private(i ) reduction(+:gc) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "LT_DEFAULT;\n\tnit = NIT_DEFAULT;\n\tnx[lt] = NX_DEFAULT;\n\tny[lt] = NY_DEFAULT;\n\tnz[lt] = NZ_DEFAULT;\n\n\tfor (i = 0; i <= 7; i++) {\n\t debug_vec[i] = DEBUG_DEFAULT;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(lt ) ", "context_chars": 100, "text": "r ( k = lt-1; k >= 1; k--) {\n\tnx[k] = nx[k+1]/2;\n\tny[k] = ny[k+1]/2;\n\tnz[k] = nz[k+1]/2;\n }\n\n for (k = 1; k <= lt; k++) {\n\tm1[k] = nx[k]+2;\n\tm2[k] = nz[k]+2;\n\tm3[k] = ny[k]+2;\n } #pragma omp parallel for firstprivate(lt ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ,r1 ,r2 ) ", "context_chars": 100, "text": "-----------------------------------------------*/\n\n int i3, i2, i1;\n double r1[M], r2[M];\n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n #pragma omp parallel for firstprivate(n1 ,r ,i2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tr1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]\n\t\t + r[i3-1][i2][i1] + r[i3+1][i2][i1];\n\t\tr2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]\n\t\t + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];\n\t }\n #pragma omp parallel for firstprivate(n1 ,c ,r ,u ,i2 ,i3 ) \n for (i1 = 1; i1 < n1-1; i1++) {\n\t\tu[i3][i2][i1] = u[i3][i2][i1]\n\t\t + c[0] * r[i3][i2][i1]\n\t\t + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]\n\t\t\t + r1[i1] )\n\t\t + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );\n/*--------------------------------------------------------------------\nc Assume c(3) = 0 (Enable line below if c(3) not= 0)\nc---------------------------------------------------------------------\nc > + c(3) * ( r2(i1-1) + r2(i1+1) )\nc-------------------------------------------------------------------*/\n\t }\n\t}\n } #pragma omp parallel for private(i1 ,i2 ,i3 ,r1 ,r2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n1 ,r ,i2 ,i3 ) ", "context_chars": 100, "text": ",i2 ,i3 ,r1 ,r2 ) \n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tr1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]\n\t\t + r[i3-1][i2][i1] + r[i3+1][i2][i1];\n\t\tr2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]\n\t\t + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];\n\t } #pragma omp parallel for firstprivate(n1 ,r ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n1 ,c ,r ,u ,i2 ,i3 ) ", "context_chars": 100, "text": "-1][i2-1][i1] + r[i3-1][i2+1][i1]\n\t\t + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];\n\t }\n for (i1 = 1; i1 < n1-1; i1++) {\n\t\tu[i3][i2][i1] = u[i3][i2][i1]\n\t\t + c[0] * r[i3][i2][i1]\n\t\t + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]\n\t\t\t + r1[i1] )\n\t\t + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );\n/*--------------------------------------------------------------------\nc Assume c(3) = 0 (Enable line below if c(3) not= 0)\nc---------------------------------------------------------------------\nc > + c(3) * ( r2(i1-1) + r2(i1+1) )\nc-------------------------------------------------------------------*/\n\t } #pragma omp parallel for firstprivate(n1 ,c ,r ,u ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ,u1 ,u2 ) ", "context_chars": 100, "text": "-----------------------------------------------*/\n\n int i3, i2, i1;\n double u1[M], u2[M];\n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n #pragma omp parallel for firstprivate(n1 ,u ,i2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tu1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]\n\t\t + u[i3-1][i2][i1] + u[i3+1][i2][i1];\n\t\tu2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]\n\t\t + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];\n\t }\n\t for (i1 = 1; i1 < n1-1; i1++) {\n\t\tr[i3][i2][i1] = v[i3][i2][i1]\n\t\t - a[0] * u[i3][i2][i1]\n/*--------------------------------------------------------------------\nc Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0)\nc---------------------------------------------------------------------\nc > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3)\nc > + u1(i1) )\nc-------------------------------------------------------------------*/\n\t\t- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )\n\t\t - a[3] * ( u2[i1-1] + u2[i1+1] );\n\t }\n\t}\n } #pragma omp parallel for private(i1 ,i2 ,i3 ,u1 ,u2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n1 ,u ,i2 ,i3 ) ", "context_chars": 100, "text": ",i2 ,i3 ,u1 ,u2 ) \n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tu1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]\n\t\t + u[i3-1][i2][i1] + u[i3+1][i2][i1];\n\t\tu2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]\n\t\t + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];\n\t } #pragma omp parallel for firstprivate(n1 ,u ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(j1 ,j2 ,i1 ,i2 ,i3 ,x1 ,y1 ,x2 ,y2 ) ", "context_chars": 100, "text": "{\n d2 = 1;\n }\n\n if (m3k == 3) {\n d3 = 2;\n } else {\n d3 = 1;\n }\n for (j3 = 1; j3 < m3j-1; j3++) {\n\ti3 = 2*j3-d3;\n/*C i3 = 2*j3-1*/\n\tfor (j2 = 1; j2 < m2j-1; j2++) {\n i2 = 2*j2-d2;\n/*C i2 = 2*j2-1*/\n\n #pragma omp parallel for firstprivate(m1j ,d2 ,d3 ,i1 ,d1 ,i2 ,i3 ,r ,j2 ,j3 ) \n for (j1 = 1; j1 < m1j; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\tx1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]\n\t\t + r[i3][i2+1][i1] + r[i3+2][i2+1][i1];\n\t\ty1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]\n\t\t + r[i3][i2+2][i1] + r[i3+2][i2+2][i1];\n\t }\n\n for (j1 = 1; j1 < m1j-1; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\ty2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1]\n\t\t + r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1];\n\t\tx2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1]\n\t\t + r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1];\n\t\ts[j3][j2][j1] =\n\t\t 0.5 * r[i3+1][i2+1][i1+1]\n\t\t + 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)\n\t\t + 0.125 * ( x1[i1] + x1[i1+2] + y2)\n\t\t + 0.0625 * ( y1[i1] + y1[i1+2] );\n\t }\n\t}\n } #pragma omp parallel for private(j1 ,j2 ,i1 ,i2 ,i3 ,x1 ,y1 ,x2 ,y2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m1j ,d2 ,d3 ,i1 ,d1 ,i2 ,i3 ,r ,j2 ,j3 ) ", "context_chars": 100, "text": "for (j2 = 1; j2 < m2j-1; j2++) {\n i2 = 2*j2-d2;\n/*C i2 = 2*j2-1*/\n\n for (j1 = 1; j1 < m1j; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\tx1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]\n\t\t + r[i3][i2+1][i1] + r[i3+2][i2+1][i1];\n\t\ty1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]\n\t\t + r[i3][i2+2][i1] + r[i3+2][i2+2][i1];\n\t } #pragma omp parallel for firstprivate(m1j ,d2 ,d3 ,i1 ,d1 ,i2 ,i3 ,r ,j2 ,j3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,z1 ,z2 ,z3 ) ", "context_chars": 100, "text": " parameter( m=535 )\n*/\n double z1[M], z2[M], z3[M];\n\n if ( n1 != 3 && n2 != 3 && n3 != 3 ) {\n\tfor (i3 = 0; i3 < mm3-1; i3++) {\n for (i2 = 0; i2 < mm2-1; i2++) {\n\t\t#pragma omp parallel for firstprivate(mm1 ,z ,i2 ,i3 ) \n\t\tfor (i1 = 0; i1 < mm1; i1++) {\n\t\t z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];\n\t\t z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];\n\t\t z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]\n\t\t\t+z[i3][i2][i1];\n\t\t u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]\n\t\t\t+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);\n\t\t}\n\t\t#pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) \n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]\n\t\t\t+0.5 * z1[i1];\n\t\t u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]\n\t\t\t+0.25*( z1[i1] + z1[i1+1] );\n\t\t}\n\t\t#pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) \n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]\n\t\t\t+0.5 * z2[i1];\n\t\t u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]\n\t\t\t+0.25*( z2[i1] + z2[i1+1] );\n\t\t}\n\t\t#pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) \n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]\n\t\t\t+0.25* z3[i1];\n\t\t u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]\n\t\t\t+0.125*( z3[i1] + z3[i1+1] );\n\t\t}\n\t }\n\t} #pragma omp parallel for private(i1 ,i2 ,z1 ,z2 ,z3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mm1 ,z ,i2 ,i3 ) ", "context_chars": 100, "text": ",i2 ,z1 ,z2 ,z3 ) \n\tfor (i3 = 0; i3 < mm3-1; i3++) {\n for (i2 = 0; i2 < mm2-1; i2++) {\n\t\tfor (i1 = 0; i1 < mm1; i1++) {\n\t\t z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];\n\t\t z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];\n\t\t z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];\n\t\t} #pragma omp parallel for firstprivate(mm1 ,z ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) ", "context_chars": 100, "text": "\n\t\t u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]\n\t\t\t+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]\n\t\t\t+0.5 * z1[i1];\n\t\t u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]\n\t\t\t+0.25*( z1[i1] + z1[i1+1] );\n\t\t} #pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) ", "context_chars": 100, "text": "[i1];\n\t\t u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]\n\t\t\t+0.25*( z1[i1] + z1[i1+1] );\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]\n\t\t\t+0.5 * z2[i1];\n\t\t u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]\n\t\t\t+0.25*( z2[i1] + z2[i1+1] );\n\t\t} #pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) ", "context_chars": 100, "text": "[i1];\n\t\t u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]\n\t\t\t+0.25*( z2[i1] + z2[i1+1] );\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]\n\t\t\t+0.25* z3[i1];\n\t\t u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]\n\t\t\t+0.125*( z3[i1] + z3[i1+1] );\n\t\t} #pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2) ", "context_chars": 100, "text": " d3 = 2;\n t3 = 1;\n\t} else {\n d3 = 1;\n t3 = 0;\n\t}\n \n {\n\tfor ( i3 = d3; i3 <= mm3-1; i3++) {\n for ( i2 = d2; i2 <= mm2-1; i2++) {\n\t\tfor ( i1 = d1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] =\n\t\t\tu[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1]\n\t\t\t+z[i3-1][i2-1][i1-1];\n\t\t}\n\t\tfor ( i1 = 1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] =\n\t\t\tu[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1]\n\t\t\t+0.5*(z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n for ( i2 = 1; i2 <= mm2-1; i2++) {\n\t\tfor ( i1 = d1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] =\n\t\t\tu[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1]\n\t\t\t+0.5*(z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t\tfor ( i1 = 1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] =\n\t\t\tu[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1]\n\t\t\t+0.25*(z[i3-1][i2][i1]+z[i3-1][i2-1][i1]\n\t\t\t +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n\t} #pragma omp parallel for private(i1 ,i2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2) ", "context_chars": 100, "text": "z[i3-1][i2][i1]+z[i3-1][i2-1][i1]\n\t\t\t +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n\t}\n\tfor ( i3 = 1; i3 <= mm3-1; i3++) {\n for ( i2 = d2; i2 <= mm2-1; i2++) {\n\t\tfor ( i1 = d1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] =\n\t\t\tu[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1]\n\t\t\t+0.5*(z[i3][i2-1][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t\tfor ( i1 = 1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] =\n\t\t\tu[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1]\n\t\t\t+0.25*(z[i3][i2-1][i1]+z[i3][i2-1][i1-1]\n\t\t\t +z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n\t for ( i2 = 1; i2 <= mm2-1; i2++) {\n\t\tfor ( i1 = d1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] =\n\t\t\tu[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1]\n\t\t\t+0.25*(z[i3][i2][i1-1]+z[i3][i2-1][i1-1]\n\t\t\t +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t\tfor ( i1 = 1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] =\n\t\t\tu[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1]\n\t\t\t+0.125*(z[i3][i2][i1]+z[i3][i2-1][i1]\n\t\t\t\t+z[i3][i2][i1-1]+z[i3][i2-1][i1-1]\n\t\t\t\t+z[i3-1][i2][i1]+z[i3-1][i2-1][i1]\n\t\t\t\t+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n\t} #pragma omp parallel for private(i1 ,i2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,a ) reduction(+:tmp) reduction(+:s) ", "context_chars": 100, "text": "\n\n double s = 0.0;\n int i3, i2, i1, n;\n double a = 0.0, tmp = 0.0;\n\n n = nx*ny*nz;\n\n for (i3 = 1; i3 < n3-1; i3++) {\n\t#pragma omp parallel for firstprivate(n3 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) \n\tfor (i2 = 1; i2 < n2-1; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i2 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) \n for (i1 = 1; i1 < n1-1; i1++) {\n\t\ts = s + r[i3][i2][i1] * r[i3][i2][i1];\n\t\ta = fabs(r[i3][i2][i1]);\n\t\tif (a > tmp) tmp = a;\n\t }\n\t}\n } #pragma omp parallel for private(i1 ,i2 ,a ) reduction(+:tmp) reduction(+:s) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) ", "context_chars": 100, "text": "allel for private(i1 ,i2 ,a ) reduction(+:tmp) reduction(+:s) \n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i2 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) \n for (i1 = 1; i1 < n1-1; i1++) {\n\t\ts = s + r[i3][i2][i1] * r[i3][i2][i1];\n\t\ta = fabs(r[i3][i2][i1]);\n\t\tif (a > tmp) tmp = a;\n\t }\n\t} #pragma omp parallel for firstprivate(n3 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i2 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) ", "context_chars": 100, "text": " ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) \n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 1; i1 < n1-1; i1++) {\n\t\ts = s + r[i3][i2][i1] * r[i3][i2][i1];\n\t\ta = fabs(r[i3][i2][i1]);\n\t\tif (a > tmp) tmp = a;\n\t } #pragma omp parallel for firstprivate(n3 ,i2 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "--------------------------------------------------*/\n\n int i1, i2, i3;\n\n /* axis = 1 */\n{\n for ( i3 = 1; i3 < n3-1; i3++) {\n\t#pragma omp parallel for firstprivate(n3 ,i1 ,u ,n1 ,n2 ,i3 ) \n\tfor ( i2 = 1; i2 < n2-1; i2++) {\n\t u[i3][i2][n1-1] = u[i3][i2][1];\n\t u[i3][i2][0] = u[i3][i2][n1-2];\n\t}\n// } #pragma omp parallel for private(i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i1 ,u ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": "s = 1 */\n{\n #pragma omp parallel for private(i1 ,i2 ,i3 ) \n for ( i3 = 1; i3 < n3-1; i3++) {\n\tfor ( i2 = 1; i2 < n2-1; i2++) {\n\t u[i3][i2][n1-1] = u[i3][i2][1];\n\t u[i3][i2][0] = u[i3][i2][n1-2];\n\t} #pragma omp parallel for firstprivate(n3 ,i1 ,u ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i2 ,u ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": "i2][n1-2];\n\t}\n// }\n\n /* axis = 2 */\n//#pragma omp for\n// for ( i3 = 1; i3 < n3-1; i3++) {\n\tfor ( i1 = 0; i1 < n1; i1++) {\n\t u[i3][n2-1][i1] = u[i3][1][i1];\n\t u[i3][0][i1] = u[i3][n2-2][i1];\n\t} #pragma omp parallel for firstprivate(n3 ,i2 ,u ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i3 ) ", "context_chars": 100, "text": "i3][n2-1][i1] = u[i3][1][i1];\n\t u[i3][0][i1] = u[i3][n2-2][i1];\n\t}\n }\n\n /* axis = 3 */\n for ( i2 = 0; i2 < n2; i2++) {\n\tfor ( i1 = 0; i1 < n1; i1++) {\n\t u[n3-1][i2][i1] = u[1][i2][i1];\n\t u[0][i2][i1] = u[n3-2][i2][i1];\n\t}\n } #pragma omp parallel for private(i1 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ks for twenty candidates\nc-------------------------------------------------------------------*/\n for (i = 0; i < MM; i++) {\n\tten[i][1] = 0.0;\n\tj1[i][1] = 0;\n\tj2[i][1] = 0;\n\tj3[i][1] = 0;\n\tten[i][0] = 1.0;\n\tj1[i][0] = 0;\n\tj2[i][0] = 0;\n\tj3[i][0] = 0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i2 ,i1 ) ", "context_chars": 100, "text": "n\");\n for (i = MM-1; i >= 0; i--) {\n\tprintf(\" %4d\", jg[0][i][1]);\n }\n printf(\"\\n\");*/\n\n for (i3 = 0; i3 < n3; i3++) {\n\t#pragma omp parallel for firstprivate(n3 ,i1 ,z ,n1 ,n2 ,i3 ) \n\tfor (i2 = 0; i2 < n2; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i2 ,z ,n1 ,n2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for private(i2 ,i1 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i1 ,z ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": " printf(\"\\n\");*/\n\n #pragma omp parallel for private(i2 ,i1 ) \n for (i3 = 0; i3 < n3; i3++) {\n\tfor (i2 = 0; i2 < n2; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i2 ,z ,n1 ,n2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t} #pragma omp parallel for firstprivate(n3 ,i1 ,z ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i2 ,z ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(n3 ,i1 ,z ,n1 ,n2 ,i3 ) \n\tfor (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t } #pragma omp parallel for firstprivate(n3 ,i2 ,z ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "---\nc-------------------------------------------------------------------*/\n\n int i1, i2, i3;\n for (i3 = 0;i3 < n3; i3++) {\n\t#pragma omp parallel for firstprivate(i3 ) \n\tfor (i2 = 0; i2 < n2; i2++) {\n #pragma omp parallel for firstprivate(i2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for private(i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i3 ) ", "context_chars": 100, "text": "nt i1, i2, i3;\n #pragma omp parallel for private(i1 ,i2 ,i3 ) \n for (i3 = 0;i3 < n3; i3++) {\n\tfor (i2 = 0; i2 < n2; i2++) {\n #pragma omp parallel for firstprivate(i2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t} #pragma omp parallel for firstprivate(i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i2 ,i3 ) ", "context_chars": 100, "text": "n3; i3++) {\n\t#pragma omp parallel for firstprivate(i3 ) \n\tfor (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t } #pragma omp parallel for firstprivate(i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:j) ", "context_chars": 100, "text": "2[i];\n\n\n/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */\n\n j = 0;\n for( i=1; i key_array[i] )\n j++;\n\n\n if( j != 0 )\n {\n printf( \"Full_verify: number of keys out of sort: %d\\n\",\n j );\n } #pragma omp parallel for private(i ) reduction(+:j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "est keys are, load into */\n/* top of array bucket_size */\n for( i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " */\n for( i=0; ifor( i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_threading/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "le timecounter, maxtime;\n\n\n\n/* Initialize the verification arrays if a valid class */\n for( i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private (c1,c2) firstprivate (n)", "context_chars": 100, "text": "00][500],double B[500][500])\n{\n//int i;\n//int j;\n{\n int c1;\n int c2;\n if (n >= 1) {\n \nfor (c1 = 0; c1 <= n + -1; c1 += 1) {\n \n#pragma omp parallel for private (c2)\n for (c2 = 0; c2 <= n + -1; c2 += 1) {\n X[c1][c2] = (((double )c1) * (c2 + 1) + 1) / n;\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n }\n } #pragma omp parallel for private (c1,c2) firstprivate (n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private (c2)", "context_chars": 100, "text": " parallel for private (c1,c2) firstprivate (n)\n for (c1 = 0; c1 <= n + -1; c1 += 1) {\n \nfor (c2 = 0; c2 <= n + -1; c2 += 1) {\n X[c1][c2] = (((double )c1) * (c2 + 1) + 1) / n;\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n } #pragma omp parallel for private (c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private (c2,c8)", "context_chars": 100, "text": "2;\n//#pragma scop\n{\n int c0;\n int c2;\n int c8;\n for (c0 = 0; c0 <= 9; c0 += 1) {\n \nfor (c2 = 0; c2 <= 499; c2 += 1) {\n for (c8 = 1; c8 <= 499; c8 += 1) {\n B[c2][c8] = B[c2][c8] - A[c2][c8] * A[c2][c8] / B[c2][c8 - 1];\n }\n for (c8 = 1; c8 <= 499; c8 += 1) {\n X[c2][c8] = X[c2][c8] - X[c2][c8 - 1] * A[c2][c8] / B[c2][c8 - 1];\n }\n for (c8 = 0; c8 <= 497; c8 += 1) {\n X[c2][500 - c8 - 2] = (X[c2][500 - 2 - c8] - X[c2][500 - 2 - c8 - 1] * A[c2][500 - c8 - 3]) / B[c2][500 - 3 - c8];\n }\n } #pragma omp parallel for private (c2,c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private (c2)", "context_chars": 100, "text": "8] - X[c2][500 - 2 - c8 - 1] * A[c2][500 - c8 - 3]) / B[c2][500 - 3 - c8];\n }\n }\n \nfor (c2 = 0; c2 <= 499; c2 += 1) {\n X[c2][500 - 1] = X[c2][500 - 1] / B[c2][500 - 1];\n } #pragma omp parallel for private (c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private (c2,c8)", "context_chars": 100, "text": "= 0; c2 <= 499; c2 += 1) {\n X[c2][500 - 1] = X[c2][500 - 1] / B[c2][500 - 1];\n }\n \nfor (c2 = 0; c2 <= 499; c2 += 1) {\n for (c8 = 1; c8 <= 499; c8 += 1) {\n B[c8][c2] = B[c8][c2] - A[c8][c2] * A[c8][c2] / B[c8 - 1][c2];\n }\n for (c8 = 1; c8 <= 499; c8 += 1) {\n X[c8][c2] = X[c8][c2] - X[c8 - 1][c2] * A[c8][c2] / B[c8 - 1][c2];\n }\n for (c8 = 0; c8 <= 497; c8 += 1) {\n X[500 - 2 - c8][c2] = (X[500 - 2 - c8][c2] - X[500 - c8 - 3][c2] * A[500 - 3 - c8][c2]) / B[500 - 2 - c8][c2];\n }\n } #pragma omp parallel for private (c2,c8)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private (c2)", "context_chars": 100, "text": "8][c2] - X[500 - c8 - 3][c2] * A[500 - 3 - c8][c2]) / B[500 - 2 - c8][c2];\n }\n }\n \nfor (c2 = 0; c2 <= 499; c2 += 1) {\n X[500 - 1][c2] = X[500 - 1][c2] / B[500 - 1][c2];\n } #pragma omp parallel for private (c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB009-lastprivatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) lastprivate (x) firstprivate (len)", "context_chars": 100, "text": "dio.h>\n#include \n\nint main(int argc,char *argv[])\n{\n int i;\n int x;\n int len = 10000;\n \nfor (i = 0; i <= len - 1; i += 1) {\n x = i;\n } #pragma omp parallel for private (i) lastprivate (x) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "gv[])\n{\n int i;\n int j;\n float temp;\n float sum = 0.0;\n int len = 100;\n float u[100][100];\n \nfor (i = 0; i <= len - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= len - 1; j += 1) {\n u[i][j] = 0.5;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "t u[100][100];\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= len - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n u[i][j] = 0.5;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp,i,j) reduction (+:sum) firstprivate (len)", "context_chars": 100, "text": " parallel for private (j)\n for (j = 0; j <= len - 1; j += 1) {\n u[i][j] = 0.5;\n }\n }\n \nfor (i = 0; i <= len - 1; i += 1) {\n \n#pragma omp parallel for private (temp,j) reduction (+:sum)\n for (j = 0; j <= len - 1; j += 1) {\n temp = u[i][j];\n sum = sum + temp * temp;\n }\n } #pragma omp parallel for private (temp,i,j) reduction (+:sum) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp,j) reduction (+:sum)", "context_chars": 100, "text": " private (temp,i,j) reduction (+:sum) firstprivate (len)\n for (i = 0; i <= len - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private (temp,j) reduction (+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB019-plusplus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "outLen = 0;\n if (argc > 1) \n inLen = atoi(argv[1]);\n int input[inLen];\n int output[inLen];\n \nfor (i = 0; i <= inLen - 1; i += 1) {\n input[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (len)", "context_chars": 100, "text": "if (argc > 1) \n len = atoi(argv[1]);\n int numNodes = len;\n int numNodes2 = 0;\n int x[len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n if (i % 2 == 0) \n x[i] = 5;\n else \n x[i] = - 5;\n } #pragma omp parallel for private (i) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (-:numNodes2)", "context_chars": 100, "text": "0; i <= len - 1; i += 1) {\n if (i % 2 == 0) \n x[i] = 5;\n else \n x[i] = - 5;\n }\n \nfor (i = numNodes - 1; i >= 0; i += -1) {\n if (x[i] <= 0) {\n numNodes2--;\n }\n } #pragma omp parallel for private (i) reduction (-:numNodes2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "float sum = 0.0;\n int len = 100;\n if (argc > 1) \n len = atoi(argv[1]);\n float u[len][len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= len - 1; j += 1) {\n u[i][j] = 0.5;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "t u[len][len];\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= len - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n u[i][j] = 0.5;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp,i,j) reduction (+:sum) firstprivate (len)", "context_chars": 100, "text": " parallel for private (j)\n for (j = 0; j <= len - 1; j += 1) {\n u[i][j] = 0.5;\n }\n }\n \nfor (i = 0; i <= len - 1; i += 1) {\n \n#pragma omp parallel for private (temp,j) reduction (+:sum)\n for (j = 0; j <= len - 1; j += 1) {\n temp = u[i][j];\n sum = sum + temp * temp;\n }\n } #pragma omp parallel for private (temp,i,j) reduction (+:sum) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp,j) reduction (+:sum)", "context_chars": 100, "text": " private (temp,i,j) reduction (+:sum) firstprivate (len)\n for (i = 0; i <= len - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private (temp,j) reduction (+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "int main(int argc,char *argv[])\n{\n int i;\n int j;\n double a[20][20];\n memset(a,0,sizeof(a));\n \nfor (i = 0; i <= 19; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 19; j += 1) {\n a[i][j] += (i + j) + 0.1;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "set(a,0,sizeof(a));\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 19; i += 1) {\n \nfor (j = 0; j <= 19; j += 1) {\n a[i][j] += (i + j) + 0.1;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " j <= 19; j += 1) {\n a[i][j] += (i + j) + 0.1;\n }\n }\n for (i = 0; i <= 18; i += 1) {\n \nfor (j = 0; j <= 19; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB071-targetparallelfor-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "*/\n#include \n\nint main(int argc,char *argv[])\n{\n int i;\n int len = 1000;\n int a[len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB071-targetparallelfor-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n }\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = a[i] + 1;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB034-truedeplinear-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ar *argv[])\n{\n int i;\n int len = 2000;\n if (argc > 1) \n len = atoi(argv[1]);\n int a[len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB109-orderedmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:x)", "context_chars": 100, "text": "d clause\n * Data race pair: x@56:5 vs. x@56:5\n * */\n#include \n\nint main()\n{\n int x = 0;\n \nfor (int i = 0; i <= 99; i += 1) {\n x++;\n } #pragma omp parallel for reduction (+:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB008-indirectaccess4-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ouble *xa1 = base;\n double *xa2 = xa1 + 12;\n int i;\n// initialize segments touched by indexSet\n \nfor (i = 521; i <= 2025; i += 1) {\n base[i] = 0.5 * i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB002-antidep1-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ar *argv[])\n{\n int i;\n int len = 1000;\n if (argc > 1) \n len = atoi(argv[1]);\n int a[len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB093-doall2-collapse-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "ivate. \n*/\n#include \n#include \nint a[100][100];\n\nint main()\n{\n int i;\n int j;\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB093-doall2-collapse-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "{\n int i;\n int j;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = i;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB093-doall2-collapse-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "gma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i;\n }\n }\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = a[i][j] + 1;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB093-doall2-collapse-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "][j] = i;\n }\n }\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = a[i][j] + 1;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB110-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction (+:x)", "context_chars": 100, "text": "f ordered directive and clause, no data races\n * */\n#include \n\nint main()\n{\n int x = 0;\n \nfor (int i = 0; i <= 99; i += 1) {\n x++;\n } #pragma omp parallel for reduction (+:x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) firstprivate (n,m)", "context_chars": 100, "text": " \nint n = 100;\nint m = 100;\ndouble b[100][100];\n\nint init()\n{\n int i;\n int j;\n int k;\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i * j);\n }\n } #pragma omp parallel for private (i,j) firstprivate (n,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j) firstprivate (n,m)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i * j);\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) firstprivate (n,m)", "context_chars": 100, "text": "1) {\n b[i][j] = (i * j);\n }\n }\n return 0;\n}\n\nvoid foo(int n,int m)\n{\n int i;\n int j;\n \nfor (i = 0; i <= n - 1; i += 1) {\n// Be careful about bounds of j\n for (j = 1; j <= m - 1; j += 1) {\n b[i][j] = b[i][j - 1];\n }\n } #pragma omp parallel for private (i,j) firstprivate (n,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB059-lastprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) lastprivate (x)", "context_chars": 100, "text": "thin the last iteration.\n*/\n#include \n#include \n\nvoid foo()\n{\n int i;\n int x;\n \nfor (i = 0; i <= 99; i += 1) {\n x = i;\n } #pragma omp parallel for private (i) lastprivate (x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB037-truedepseconddimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "000][1000];\n\nint main(int argc,char *argv[])\n{\n int i;\n int j;\n int n = 1000;\n int m = 1000;\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i + j);\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB037-truedepseconddimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "\n int m = 1000;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i + j);\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB037-truedepseconddimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) firstprivate (n,m)", "context_chars": 100, "text": "arallel for private (j)\n for (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i + j);\n }\n }\n \nfor (i = 0; i <= n - 1; i += 1) {\n for (j = 1; j <= m - 1; j += 1) {\n b[i][j] = b[i][j - 1];\n }\n } #pragma omp parallel for private (i,j) firstprivate (n,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB114-if-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": ".h>\n#include \n\nint main(int argc,char *argv[])\n{\n int i;\n int len = 100;\n int a[100];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB001-antidep1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": ">\n#include \n\nint main(int argc,char *argv[])\n{\n int i;\n int len = 1000;\n int a[1000];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB024-simdtruedep-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "omp.h> \n\nint main(int argc,char *argv[])\n{\n int i;\n int len = 100;\n int a[100];\n int b[100];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n b[i] = i + 1;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB091-threadprivate2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:sum0)", "context_chars": 100, "text": " \nint sum0 = 0;\nint sum1 = 0;\n\nint main()\n{\n int len = 1000;\n int i;\n int sum = 0;\n{\n \nfor (i = 0; i <= len - 1; i += 1) {\n sum0 = sum0 + i;\n } #pragma omp parallel for private (i) reduction (+:sum0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB091-threadprivate2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:sum1) firstprivate (len)", "context_chars": 100, "text": "- 1; i += 1) {\n sum0 = sum0 + i;\n }\n }\n sum = sum + sum0;\n/* reference calculation */\n \nfor (i = 0; i <= len - 1; i += 1) {\n sum1 = sum1 + i;\n } #pragma omp parallel for private (i) reduction (+:sum1) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "e pair.\n*/\n#include \n#include \nint a[100][100];\n\nint main()\n{\n int i;\n int j;\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i + j;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "{\n int i;\n int j;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = i + j;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i + j;\n }\n }\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] += 1;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " = i + j;\n }\n }\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] += 1;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB054-inneronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "include \n\nint main()\n{\n int i;\n int j;\n int n = 100;\n int m = 100;\n double b[n][m];\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= n - 1; j += 1) {\n b[i][j] = ((double )(i * j));\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB054-inneronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " double b[n][m];\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= n - 1; j += 1) {\n b[i][j] = ((double )(i * j));\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB054-inneronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "1; j += 1) {\n b[i][j] = ((double )(i * j));\n }\n }\n for (i = 1; i <= n - 1; i += 1) {\n \nfor (j = 1; j <= m - 1; j += 1) {\n b[i][j] = b[i - 1][j - 1];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB018-plusplus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\nint input[1000];\nint output[1000];\n\nint main()\n{\n int i;\n int inLen = 1000;\n int outLen = 0;\n \nfor (i = 0; i <= inLen - 1; i += 1) {\n input[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "um = 0.0;\n double sum2 = 0.0;\n double a[len];\n double b[len];\n/*Initialize with some values*/\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = ((double )i) / 2.0;\n b[i] = ((double )i) / 3.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,i2) reduction (+:sum)", "context_chars": 100, "text": "i = 0; i <= len - 1; i += 1) {\n a[i] = ((double )i) / 2.0;\n b[i] = ((double )i) / 3.0;\n }\n \nfor (i2 = 0; i2 <= len - 1; i2 += 256) {\n \n#pragma omp parallel for private (i) reduction (+:sum)\n for (i = i2; i <= ((i2 + 256 < len?i2 + 256 : len)) - 1; i += 1) {\n sum += a[i] * b[i];\n }\n } #pragma omp parallel for private (i,i2) reduction (+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:sum)", "context_chars": 100, "text": "a omp parallel for private (i,i2) reduction (+:sum)\n for (i2 = 0; i2 <= len - 1; i2 += 256) {\n \nfor (i = i2; i <= ((i2 + 256 < len?i2 + 256 : len)) - 1; i += 1) {\n sum += a[i] * b[i];\n } #pragma omp parallel for private (i) reduction (+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:sum2) firstprivate (len)", "context_chars": 100, "text": "+ 256 : len)) - 1; i += 1) {\n sum += a[i] * b[i];\n }\n }\n/* CPU reference computation */\n \nfor (i = 0; i <= len - 1; i += 1) {\n sum2 += a[i] * b[i];\n } #pragma omp parallel for private (i) reduction (+:sum2) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB084-threadprivatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:sum1)", "context_chars": 100, "text": "or (i = 1; i <= 1000; i += 1) {\n foo(i);\n }\n sum = sum + sum0;\n/* reference calculation */\n \nfor (i = 1; i <= 1000; i += 1) {\n sum1 = sum1 + i;\n } #pragma omp parallel for private (i) reduction (+:sum1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB049-fprintf-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ain(int argc,char *argv[])\n{\n int i;\n int ret;\n FILE *pfile;\n int len = 1000;\n int A[1000];\n \nfor (i = 0; i <= len - 1; i += 1) {\n A[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB099-targetparallelfor2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n\nint main(int argc,char *argv[])\n{\n int i;\n int len = 1000;\n double a[len];\n double b[len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = ((double )i) / 2.0;\n b[i] = 0.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB032-truedepfirstdimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "0;\n if (argc > 1) \n len = atoi(argv[1]);\n int n = len;\n int m = len;\n double b[len][len];\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= m - 1; j += 1) {\n b[i][j] = 0.5;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB032-truedepfirstdimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "ble b[len][len];\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= m - 1; j += 1) {\n b[i][j] = 0.5;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB032-truedepfirstdimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " = 0; j <= m - 1; j += 1) {\n b[i][j] = 0.5;\n }\n }\n for (i = 1; i <= n - 1; i += 1) {\n \nfor (j = 1; j <= m - 1; j += 1) {\n b[i][j] = b[i - 1][j - 1];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB092-threadprivatemissing2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:sum0)", "context_chars": 100, "text": "0 = 0;\nint sum1 = 0;\n//#pragma omp threadprivate(sum0)\n\nint main()\n{\n int i;\n int sum = 0;\n{\n \nfor (i = 1; i <= 1000; i += 1) {\n sum0 = sum0 + i;\n } #pragma omp parallel for private (i) reduction (+:sum0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB092-threadprivatemissing2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:sum1)", "context_chars": 100, "text": "000; i += 1) {\n sum0 = sum0 + i;\n }\n }\n sum = sum + sum0;\n/* reference calculation */\n \nfor (i = 1; i <= 1000; i += 1) {\n sum1 = sum1 + i;\n } #pragma omp parallel for private (i) reduction (+:sum1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": " \ndouble a[100][100];\ndouble v[100];\ndouble v_out[100];\n\nint init()\n{\n int i;\n int j;\n int k;\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = (i * j) + 0.01;\n }\n v_out[i] = (i * j) + 0.01;\n v[i] = (i * j) + 0.01;\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": ";\n int j;\n int k;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = (i * j) + 0.01;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "[i] = (i * j) + 0.01;\n v[i] = (i * j) + 0.01;\n }\n return 0;\n}\n\nint mv()\n{\n int i;\n int j;\n \nfor (i = 0; i <= 99; i += 1) {\n double sum = 0.0;\n \n#pragma omp parallel for private (j) reduction (+:sum)\n for (j = 0; j <= 99; j += 1) {\n sum += a[i][j] * v[j];\n }\n v_out[i] = sum;\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j) reduction (+:sum)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n double sum = 0.0;\n \nfor (j = 0; j <= 99; j += 1) {\n sum += a[i][j] * v[j];\n } #pragma omp parallel for private (j) reduction (+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB026-targetparallelfor-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "/\n#include \n\nint main(int argc,char *argv[])\n{\n int i;\n int len = 1000;\n int a[1000];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB094-doall2-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "ition. \n*/\n#include \n#include \nint a[100][100];\n\nint main()\n{\n int i;\n int j;\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i + j;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB094-doall2-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "{\n int i;\n int j;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = i + j;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB010-lastprivatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) lastprivate (x) firstprivate (len)", "context_chars": 100, "text": "c,char *argv[])\n{\n int i;\n int x;\n int len = 10000;\n if (argc > 1) \n len = atoi(argv[1]);\n \nfor (i = 0; i <= len - 1; i += 1) {\n x = i;\n } #pragma omp parallel for private (i) lastprivate (x) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB065-pireduction-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (x,i) reduction (+:pi)", "context_chars": 100, "text": ";\n long i;\n double x;\n double interval_width;\n interval_width = 1.0 / ((double )2000000000);\n \nfor (i = 0; i <= ((long )2000000000) - 1; i += 1) {\n x = (i + 0.5) * interval_width;\n pi += 1.0 / (x * x + 1.0);\n } #pragma omp parallel for private (x,i) reduction (+:pi)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": ")\n{\n int i;\n int tmp;\n int len = 100;\n if (argc > 1) \n len = atoi(argv[1]);\n int a[len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (tmp,i)", "context_chars": 100, "text": "\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n }\n \nfor (i = 0; i <= len - 1; i += 1) {\n tmp = a[i] + i;\n a[i] = tmp;\n } #pragma omp parallel for private (tmp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB073-doall2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "d_set is a data race pair.\n*/\n#include \nint a[100][100];\n\nint main()\n{\n int i;\n int j;\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB073-doall2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "{\n int i;\n int j;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = i;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB073-doall2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "gma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i;\n }\n }\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = a[i][j] + 1;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB073-doall2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "][j] = i;\n }\n }\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = a[i][j] + 1;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB006-indirectaccess2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ouble *xa1 = base;\n double *xa2 = xa1 + 12;\n int i;\n// initialize segments touched by indexSet\n \nfor (i = 521; i <= 2025; i += 1) {\n base[i] = 0.5 * i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB030-truedep1-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "har *argv[])\n{\n int i;\n int len = 100;\n if (argc > 1) \n len = atoi(argv[1]);\n int a[len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (len)", "context_chars": 100, "text": "s function parameters\n*/\n#include \n\nvoid foo1(double o1[],double c[],int len)\n{\n int i;\n \nfor (i = 0; i <= len - 1; i += 1) {\n double volnew_o8 = 0.5 * c[i];\n o1[i] = volnew_o8;\n } #pragma omp parallel for private (i) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "= 0.5 * c[i];\n o1[i] = volnew_o8;\n }\n}\ndouble o1[100];\ndouble c[100];\n\nint main()\n{\n int i;\n \nfor (i = 0; i <= 99; i += 1) {\n c[i] = i + 1.01;\n o1[i] = i + 1.01;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "t len = 100;\n double a[len][len];\n double b[len][len];\n double c[len][len];\n int i;\n int j;\n \nfor (i = 0; i <= len - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= len - 1; j += 1) {\n a[i][j] = ((double )i) / 2.0;\n b[i][j] = ((double )i) / 3.0;\n c[i][j] = ((double )i) / 7.0;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "nt i;\n int j;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= len - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n a[i][j] = ((double )i) / 2.0;\n b[i][j] = ((double )i) / 3.0;\n c[i][j] = ((double )i) / 7.0;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) firstprivate (len)", "context_chars": 100, "text": "ble )i) / 2.0;\n b[i][j] = ((double )i) / 3.0;\n c[i][j] = ((double )i) / 7.0;\n }\n }\n \nfor (i = 0; i <= len - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= len - 1; j += 1) {\n c[i][j] = a[i][j] * b[i][j];\n }\n } #pragma omp parallel for private (i,j) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "pragma omp parallel for private (i,j) firstprivate (len)\n for (i = 0; i <= len - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n c[i][j] = a[i][j] * b[i][j];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "r option.\n*/\n#include \nint a[100][100];\nint b[100][100];\n\nint main()\n{\n int i;\n int j;\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i;\n b[i][j] = i;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "{\n int i;\n int j;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = i;\n b[i][j] = i;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "r private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i;\n b[i][j] = i;\n }\n }\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = a[i][j] + 1;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "][j] = i;\n }\n }\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = a[i][j] + 1;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "rallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = a[i][j] + 1;\n }\n }\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n b[i][j] = b[i][j] + 1;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "][j] + 1;\n }\n }\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n b[i][j] = b[i][j] + 1;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i,j,xx,yy)", "context_chars": 100, "text": "\n int yy;\n dx = 2.0 / (n - 1);\n dy = 2.0 / (m - 1);\n/* Initialize initial condition and RHS */\n//#pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m)\n for (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy)\n for (j = 0; j <= m - 1; j += 1) {\n/* -1 < x < 1 */\n xx = ((int )(- 1.0 + dx * (i - 1)));\n/* -1 < y < 1 */\n yy = ((int )(- 1.0 + dy * (j - 1)));\n u[i][j] = 0.0;\n f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy));\n }\n } #pragma omp parallel for private(i,j,xx,yy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m)", "context_chars": 100, "text": "m - 1);\n/* Initialize initial condition and RHS */\n//#pragma omp parallel for private(i,j,xx,yy)\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy)\n for (j = 0; j <= m - 1; j += 1) {\n/* -1 < x < 1 */\n xx = ((int )(- 1.0 + dx * (i - 1)));\n/* -1 < y < 1 */\n yy = ((int )(- 1.0 + dy * (j - 1)));\n u[i][j] = 0.0;\n f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy));\n }\n } #pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy)", "context_chars": 100, "text": "ma omp parallel for private (xx,yy,i,j) firstprivate (n,m)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= m - 1; j += 1) {\n/* -1 < x < 1 */\n xx = ((int )(- 1.0 + dx * (i - 1)));\n/* -1 < y < 1 */\n yy = ((int )(- 1.0 + dy * (j - 1)));\n u[i][j] = 0.0;\n f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy));\n } #pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "r = 10.0 * tol;\n k = 1;\n while(k <= mits){\n error = 0.0;\n/* Copy new solution into old */\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= m - 1; j += 1) {\n uold[i][j] = u[i][j];\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "nto old */\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= m - 1; j += 1) {\n uold[i][j] = u[i][j];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (resid,i,j) reduction (+:error)", "context_chars": 100, "text": "rivate (j)\n for (j = 0; j <= m - 1; j += 1) {\n uold[i][j] = u[i][j];\n }\n }\n \nfor (i = 1; i <= n - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (resid,j) reduction (+:error) firstprivate (omega,ax,ay,b)\n for (j = 1; j <= m - 1 - 1; j += 1) {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b;\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n }\n } #pragma omp parallel for private (resid,i,j) reduction (+:error)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (resid,j) reduction (+:error) firstprivate (omega,ax,ay,b)", "context_chars": 100, "text": "rallel for private (resid,i,j) reduction (+:error)\n for (i = 1; i <= n - 1 - 1; i += 1) {\n \nfor (j = 1; j <= m - 1 - 1; j += 1) {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b;\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n } #pragma omp parallel for private (resid,j) reduction (+:error) firstprivate (omega,ax,ay,b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB070-simd1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "zation directive\n*/\n#include \nint a[100];\nint b[100];\nint c[100];\n\nint main()\n{\n int i;\n \nfor (i = 0; i <= 99; i += 1) {\n a[i] = i * 40;\n b[i] = i - 1;\n c[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB070-simd1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "vate (i)\n for (i = 0; i <= 99; i += 1) {\n a[i] = i * 40;\n b[i] = i - 1;\n c[i] = i;\n }\n \nfor (i = 0; i <= 99; i += 1) {\n a[i] = b[i] * c[i];\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "omp.h> \n\nint main(int argc,char *argv[])\n{\n int i;\n int len = 100;\n int a[len];\n int b[len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n b[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (tmp,i)", "context_chars": 100, "text": " a[i] = i;\n b[i] = i;\n }\n/* static storage for a local variable */\n{\n static int tmp;\n \nfor (i = 0; i <= len - 1; i += 1) {\n tmp = a[i] + i;\n a[i] = tmp;\n } #pragma omp parallel for private (tmp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (tmp,i) firstprivate (len)", "context_chars": 100, "text": "] + i;\n a[i] = tmp;\n }\n }\n/* automatic storage for a local variable */\n{\n int tmp;\n \nfor (i = 0; i <= len - 1; i += 1) {\n tmp = b[i] + i;\n b[i] = tmp;\n } #pragma omp parallel for private (tmp,i) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "uble a[1000][1000];\ndouble v[1000];\ndouble v_out[1000];\n\nint init()\n{\n int i;\n int j;\n int k;\n \nfor (i = 0; i <= 999; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 999; j += 1) {\n a[i][j] = (i * j) + 0.01;\n }\n v_out[i] = (i * j) + 0.01;\n v[i] = (i * j) + 0.01;\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "\n int j;\n int k;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 999; i += 1) {\n \nfor (j = 0; j <= 999; j += 1) {\n a[i][j] = (i * j) + 0.01;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "i] = (i * j) + 0.01;\n v[i] = (i * j) + 0.01;\n }\n return 0;\n}\n\nvoid mv()\n{\n int i;\n int j;\n \nfor (i = 0; i <= 999; i += 1) {\n double sum = 0.0;\n \n#pragma omp parallel for private (j) reduction (+:sum)\n for (j = 0; j <= 999; j += 1) {\n sum += a[i][j] * v[j];\n }\n v_out[i] = sum;\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j) reduction (+:sum)", "context_chars": 100, "text": "#pragma omp parallel for private (i,j)\n for (i = 0; i <= 999; i += 1) {\n double sum = 0.0;\n \nfor (j = 0; j <= 999; j += 1) {\n sum += a[i][j] * v[j];\n } #pragma omp parallel for private (j) reduction (+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB029-truedep1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": ".h>\n#include \n\nint main(int argc,char *argv[])\n{\n int i;\n int len = 100;\n int a[100];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB111-linearmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "in()\n{\n int len = 100;\n double a[len];\n double b[len];\n double c[len];\n int i;\n int j = 0;\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = ((double )i) / 2.0;\n b[i] = ((double )i) / 3.0;\n c[i] = ((double )i) / 7.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "e \n\nint main(int argc,char *argv[])\n{\n int i;\n int tmp;\n int len = 100;\n int a[100];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (tmp,i) firstprivate (len)", "context_chars": 100, "text": "\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n }\n \nfor (i = 0; i <= len - 1; i += 1) {\n tmp = a[i] + i;\n a[i] = tmp;\n } #pragma omp parallel for private (tmp,i) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (n,g)", "context_chars": 100, "text": "/\n/*\nExample use of firstprivate()\n*/\n#include \n\nvoid foo(int *a,int n,int g)\n{\n int i;\n \nfor (i = 0; i <= n - 1; i += 1) {\n a[i] = a[i] + g;\n } #pragma omp parallel for private (i) firstprivate (n,g)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " for (i = 0; i <= n - 1; i += 1) {\n a[i] = a[i] + g;\n }\n}\nint a[100];\n\nint main()\n{\n int i;\n \nfor (i = 0; i <= 99; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) firstprivate (n,m)", "context_chars": 100, "text": " \nint n = 100;\nint m = 100;\ndouble b[100][100];\n\nint init()\n{\n int i;\n int j;\n int k;\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i * j);\n }\n } #pragma omp parallel for private (i,j) firstprivate (n,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j) firstprivate (n,m)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i * j);\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) firstprivate (n,m)", "context_chars": 100, "text": " - 1; j += 1) {\n b[i][j] = (i * j);\n }\n }\n return 0;\n}\n\nvoid foo()\n{\n int i;\n int j;\n \nfor (i = 0; i <= n - 1; i += 1) {\n// Be careful about bounds of j\n for (j = 0; j <= m - 1 - 1; j += 1) {\n b[i][j] = b[i][j + 1];\n }\n } #pragma omp parallel for private (i,j) firstprivate (n,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB047-doallchar-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ts may wrongfuly report race condition.\n*/\n#include \nchar a[100];\n\nint main()\n{\n int i;\n \nfor (i = 0; i <= 99; i += 1) {\n a[i] = (i % 120);\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB047-doallchar-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i)\n for (i = 0; i <= 99; i += 1) {\n a[i] = (i % 120);\n }\n \nfor (i = 0; i <= 99; i += 1) {\n a[i] = (a[i] + 1);\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB013-nowait-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "nclude \n\nint main()\n{\n int i;\n int error;\n int len = 1000;\n int a[len];\n int b = 5;\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB013-nowait-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (len,b)", "context_chars": 100, "text": "#pragma omp parallel for private (i)\n for (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n }\n{\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = b + a[i] * 5;\n } #pragma omp parallel for private (i) firstprivate (len,b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "omp.h> \n\nint main(int argc,char *argv[])\n{\n int i;\n int j;\n int len = 20;\n double a[20][20];\n \nfor (i = 0; i <= len - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= len - 1; j += 1) {\n a[i][j] = 0.5;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "ble a[20][20];\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= len - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n a[i][j] = 0.5;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "<= len - 1; j += 1) {\n a[i][j] = 0.5;\n }\n }\n for (i = 0; i <= len - 1 - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB005-indirectaccess1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ouble *xa1 = base;\n double *xa2 = xa1 + 12;\n int i;\n// initialize segments touched by indexSet\n \nfor (i = 521; i <= 2025; i += 1) {\n base[i] = 0.5 * i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "le a[100][100];\ndouble b[100][100];\ndouble c[100][100];\n\nint init()\n{\n int i;\n int j;\n int k;\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = ((double )i) * j;\n b[i][j] = ((double )i) * j;\n c[i][j] = ((double )i) * j;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": ";\n int j;\n int k;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = ((double )i) * j;\n b[i][j] = ((double )i) * j;\n c[i][j] = ((double )i) * j;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": " c[i][j] = ((double )i) * j;\n }\n }\n return 0;\n}\n\nint mmm()\n{\n int i;\n int j;\n int k;\n \nfor (i = 0; i <= 99; i += 1) {\n for (k = 0; k <= 99; k += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n c[i][j] = c[i][j] + a[i][k] * b[k][j];\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "llel for private (i,j,k)\n for (i = 0; i <= 99; i += 1) {\n for (k = 0; k <= 99; k += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n c[i][j] = c[i][j] + a[i][k] * b[k][j];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB085-threadprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:sum1) firstprivate (len)", "context_chars": 100, "text": "; i += 1) {\n foo(i);\n }\n{\n sum = sum + sum0;\n }\n }\n/* reference calculation */\n \nfor (i = 0; i <= len - 1; i += 1) {\n sum1 = sum1 + i;\n } #pragma omp parallel for private (i) reduction (+:sum1) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB007-indirectaccess3-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ouble *xa1 = base;\n double *xa2 = xa1 + 12;\n int i;\n// initialize segments touched by indexSet\n \nfor (i = 521; i <= 2025; i += 1) {\n base[i] = 0.5 * i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (len)", "context_chars": 100, "text": "t i;\n int len = 100;\n int numNodes = len;\n int numNodes2 = 0;\n int x[100];\n// initialize x[]\n \nfor (i = 0; i <= len - 1; i += 1) {\n if (i % 2 == 0) \n x[i] = 5;\n else \n x[i] = - 5;\n } #pragma omp parallel for private (i) firstprivate (len)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (-:numNodes2)", "context_chars": 100, "text": "0; i <= len - 1; i += 1) {\n if (i % 2 == 0) \n x[i] = 5;\n else \n x[i] = - 5;\n }\n \nfor (i = numNodes - 1; i >= 0; i += -1) {\n if (x[i] <= 0) {\n numNodes2--;\n }\n } #pragma omp parallel for private (i) reduction (-:numNodes2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB046-doall2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "ill be shared by default. \n*/\n#include \nint a[100][100];\n\nint main()\n{\n int i;\n int j;\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i + j;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB046-doall2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "{\n int i;\n int j;\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = i + j;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB046-doall2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = i + j;\n }\n }\n \nfor (i = 0; i <= 99; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= 99; j += 1) {\n a[i][j] = a[i][j] + 1;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB046-doall2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " = i + j;\n }\n }\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= 99; i += 1) {\n \nfor (j = 0; j <= 99; j += 1) {\n a[i][j] = a[i][j] + 1;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB025-simdtruedep-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n int i;\n int len = 100;\n if (argc > 1) \n len = atoi(argv[1]);\n int a[len];\n int b[len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n b[i] = i + 1;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB033-truedeplinear-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n#include \n#include \n\nint main(int argc,char *argv[])\n{\n int i;\n int a[2000];\n \nfor (i = 0; i <= 1999; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB045-doall1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "Simplest one dimension array computation\n*/\n#include \nint a[100];\n\nint main()\n{\n int i;\n \nfor (i = 0; i <= 99; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB045-doall1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "nt i;\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= 99; i += 1) {\n a[i] = i;\n }\n \nfor (i = 0; i <= 99; i += 1) {\n a[i] = a[i] + 1;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB052-indirectaccesssharebase-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "End execution. \\n\");\n return 1;\n }\n double *xa1 = base;\n double *xa2 = base + 12;\n int i;\n \nfor (i = 521; i <= 2025; i += 1) {\n base[i] = 0.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB040-truedepsingleelement-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "{\n int len = 1000;\n int i;\n if (argc > 1) \n len = atoi(argv[1]);\n int a[len];\n a[0] = 2;\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB031-truedepfirstdimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "t argc,char *argv[])\n{\n int i;\n int j;\n int n = 1000;\n int m = 1000;\n double b[1000][1000];\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= m - 1; j += 1) {\n b[i][j] = 0.5;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB031-truedepfirstdimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "e b[1000][1000];\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= m - 1; j += 1) {\n b[i][j] = 0.5;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB031-truedepfirstdimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " = 0; j <= m - 1; j += 1) {\n b[i][j] = 0.5;\n }\n }\n for (i = 1; i <= n - 1; i += 1) {\n \nfor (j = 1; j <= m - 1; j += 1) {\n b[i][j] = b[i - 1][j - 1];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB057-jacobiinitialize-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m)", "context_chars": 100, "text": " int yy;\n dx = 2.0 / (n - 1);\n dy = 2.0 / (m - 1);\n/* Initialize initial condition and RHS */\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy)\n for (j = 0; j <= m - 1; j += 1) {\n/* -1 < x < 1 */\n xx = ((int )(- 1.0 + dx * (i - 1)));\n/* -1 < y < 1 */\n yy = ((int )(- 1.0 + dy * (j - 1)));\n u[i][j] = 0.0;\n f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy));\n }\n } #pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB057-jacobiinitialize-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy)", "context_chars": 100, "text": "ma omp parallel for private (xx,yy,i,j) firstprivate (n,m)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= m - 1; j += 1) {\n/* -1 < x < 1 */\n xx = ((int )(- 1.0 + dx * (i - 1)));\n/* -1 < y < 1 */\n yy = ((int )(- 1.0 + dy * (j - 1)));\n u[i][j] = 0.0;\n f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy));\n } #pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": " int i;\n int j;\n int len = 20;\n if (argc > 1) \n len = atoi(argv[1]);\n double a[len][len];\n \nfor (i = 0; i <= len - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= len - 1; j += 1) {\n a[i][j] = 0.5;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "e a[len][len];\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= len - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n a[i][j] = 0.5;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "<= len - 1; j += 1) {\n a[i][j] = 0.5;\n }\n }\n for (i = 0; i <= len - 1 - 1; i += 1) {\n \nfor (j = 0; j <= len - 1; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB104-nowait-barrier-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "nclude \n\nint main()\n{\n int i;\n int error;\n int len = 1000;\n int a[len];\n int b = 5;\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB104-nowait-barrier-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (len,b)", "context_chars": 100, "text": "\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= len - 1; i += 1) {\n a[i] = i;\n }\n \nfor (i = 0; i <= len - 1; i += 1) {\n a[i] = b + a[i] * 5;\n } #pragma omp parallel for private (i) firstprivate (len,b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB038-truedepseconddimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": " 1000;\n if (argc > 1) \n len = atoi(argv[1]);\n int n = len;\n int m = len;\n double b[n][m];\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i + j);\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB038-truedepseconddimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " double b[n][m];\n \n#pragma omp parallel for private (i,j)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i + j);\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/DRB038-truedepseconddimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "arallel for private (j)\n for (j = 0; j <= m - 1; j += 1) {\n b[i][j] = (i + j);\n }\n }\n \nfor (i = 0; i <= n - 1; i += 1) {\n for (j = 1; j <= m - 1; j += 1) {\n b[i][j] = b[i][j - 1];\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/utilities/polybench.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:tmp) firstprivate (cs)", "context_chars": 100, "text": "ouble ));\n double *flush = (double *)(calloc(cs,sizeof(double )));\n int i;\n double tmp = 0.0;\n \nfor (i = 0; i <= cs - 1; i += 1) {\n tmp += flush[i];\n } #pragma omp parallel for private (i) reduction (+:tmp) firstprivate (cs)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) firstprivate (n)", "context_chars": 100, "text": "on. */\n#include \n\nstatic void init_array(int n,double C[1024][1024])\n{\n int i;\n int j;\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= n - 1; j += 1) {\n C[i][j] = 42;\n }\n } #pragma omp parallel for private (i,j) firstprivate (n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,j) firstprivate (n)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= n - 1; j += 1) {\n C[i][j] = 42;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) firstprivate (n)", "context_chars": 100, "text": "call and return. */\n\nstatic void kernel_template(int n,double C[1024][1024])\n{\n int i;\n int j;\n \nfor (i = 0; i <= n - 1; i += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 0; j <= n - 1; j += 1) {\n C[i][j] += 42;\n }\n } #pragma omp parallel for private (i,j) firstprivate (n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,j) firstprivate (n)\n for (i = 0; i <= n - 1; i += 1) {\n \nfor (j = 0; j <= n - 1; j += 1) {\n C[i][j] += 42;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/b+tree/main.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "to INT_MIN\n k -> keys[0] = - 0x7fffffff - 1;\n k -> keys[k -> num_keys - 1] = 0x7fffffff;\n \nfor (i = k -> num_keys; i <= order - 1; i += 1) {\n k -> keys[i] = 0x7fffffff;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/b+tree/main.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " -> pointers[order - 1] = leaf -> pointers[order - 1];\n leaf -> pointers[order - 1] = new_leaf;\n \nfor (i = leaf -> num_keys; i <= order - 1 - 1; i += 1) {\n leaf -> pointers[i] = ((void *)0);\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/b+tree/main.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (order)", "context_chars": 100, "text": " (i = leaf -> num_keys; i <= order - 1 - 1; i += 1) {\n leaf -> pointers[i] = ((void *)0);\n }\n \nfor (i = new_leaf -> num_keys; i <= order - 1 - 1; i += 1) {\n new_leaf -> pointers[i] = ((void *)0);\n } #pragma omp parallel for private (i) firstprivate (order)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/b+tree/main.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ointer in the parent pointing\n* to n. \n* If n is the leftmost child, this means\n* return -1.\n*/\n \nfor (i = 0; i <= n -> parent -> num_keys; i += 1) {\n if (n -> parent -> pointers[i] == n) \n return i - 1;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/b+tree/main.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (order)", "context_chars": 100, "text": "for tidiness.\n// A leaf uses the last pointer to point to the next leaf.\n if (n -> is_leaf) {\n \nfor (i = n -> num_keys; i <= order - 1 - 1; i += 1) {\n n -> pointers[i] = ((void *)0);\n } #pragma omp parallel for private (i) firstprivate (order)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/b+tree/main.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (order)", "context_chars": 100, "text": "_keys; i <= order - 1 - 1; i += 1) {\n n -> pointers[i] = ((void *)0);\n }\n }\n else {\n \nfor (i = n -> num_keys + 1; i <= order - 1; i += 1) {\n n -> pointers[i] = ((void *)0);\n } #pragma omp parallel for private (i) firstprivate (order)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/b+tree/main.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " exit(- 1);\n }\n _tmp;\n }));\n// OUTPUT: ans CPU initialization\n \nfor (i = 0; i <= count - 1; i += 1) {\n ans[i] . value = - 1;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/bfs/bfs.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ate mem for the result on host side\n int *h_cost = (int *)(malloc(sizeof(int ) * no_of_nodes));\n \nfor (int i = 0; i <= no_of_nodes - 1; i += 1) {\n h_cost[i] = - 1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/hotspot3D/3D.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:err)", "context_chars": 100, "text": " (i < numiter);\n}\n\nfloat accuracy(float *arr1,float *arr2,int len)\n{\n float err = 0.0;\n int i;\n \nfor (i = 0; i <= len - 1; i += 1) {\n err += (arr1[i] - arr2[i]) * (arr1[i] - arr2[i]);\n } #pragma omp parallel for private (i) reduction (+:err)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "================================================================================================\n \nfor (i = 0; i <= public . endoPoints - 1; i += 1) {\n private[i] . point_no = i;\n private[i] . in_pointer = private[i] . point_no * public . in_mod_elem;\n// original row coordinates\n private[i] . d_Row = public . d_endoRow;\n// original col coordinates\n private[i] . d_Col = public . d_endoCol;\n// updated row coordinates\n private[i] . d_tRowLoc = public . d_tEndoRowLoc;\n// updated row coordinates\n private[i] . d_tColLoc = public . d_tEndoColLoc;\n// templates\n private[i] . d_T = public . d_endoT;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " . d_tColLoc = public . d_tEndoColLoc;\n// templates\n private[i] . d_T = public . d_endoT;\n }\n \nfor (i = public . endoPoints; i <= public . allPoints - 1; i += 1) {\n private[i] . point_no = i - public . endoPoints;\n private[i] . in_pointer = private[i] . point_no * public . in_mod_elem;\n private[i] . d_Row = public . d_epiRow;\n private[i] . d_Col = public . d_epiCol;\n private[i] . d_tRowLoc = public . d_tEpiRowLoc;\n private[i] . d_tColLoc = public . d_tEpiColLoc;\n private[i] . d_T = public . d_epiT;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:in_final_sum)", "context_chars": 100, "text": " OF INPUT 1 SQUARED\n//==================================================\n in_final_sum = 0;\n \nfor (i = 0; i <= public . in_mod_elem - 1; i += 1) {\n in_final_sum = in_final_sum + d_in[i];\n } #pragma omp parallel for private (i) reduction (+:in_final_sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:in_sqr_final_sum)", "context_chars": 100, "text": "em - 1; i += 1) {\n in_final_sum = in_final_sum + d_in[i];\n }\n in_sqr_final_sum = 0;\n \nfor (i = 0; i <= public . in_mod_elem - 1; i += 1) {\n in_sqr_final_sum = in_sqr_final_sum + private . d_in_sqr[i];\n } #pragma omp parallel for private (i) reduction (+:in_sqr_final_sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (jb,ib,ja,ia) reduction (+:s) firstprivate (jp1,ia1,ia2)", "context_chars": 100, "text": "ows;\n }\n else {\n ia2 = i;\n }\n s = 0;\n// getting data\n \nfor (ja = ja1; ja <= ja2; ja += 1) {\n jb = jp1 - ja;\n \n#pragma omp parallel for private (ib,ia) reduction (+:s) firstprivate (ip1,jb)\n for (ia = ia1; ia <= ia2; ia += 1) {\n ib = ip1 - ia;\n s = s + private . d_in_mod[public . in_mod_rows * (ja - 1) + ia - 1] * private . d_in2[public . in2_rows * (jb - 1) + ib - 1];\n }\n } #pragma omp parallel for private (jb,ib,ja,ia) reduction (+:s) firstprivate (jp1,ia1,ia2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (ib,ia) reduction (+:s) firstprivate (ip1,jb)", "context_chars": 100, "text": "vate (jp1,ia1,ia2)\n for (ja = ja1; ja <= ja2; ja += 1) {\n jb = jp1 - ja;\n \nfor (ia = ia1; ia <= ia2; ia += 1) {\n ib = ip1 - ia;\n s = s + private . d_in_mod[public . in_mod_rows * (ja - 1) + ia - 1] * private . d_in2[public . in2_rows * (jb - 1) + ib - 1];\n } #pragma omp parallel for private (ib,ia) reduction (+:s) firstprivate (ip1,jb)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (sum,pos_ori,ei_new,position)", "context_chars": 100, "text": "CAL CUMULATIVE SUM\t\t\t\t\t\tSAVE IN d_in2_pad\n//==================================================\n \nfor (ei_new = 0; ei_new <= public . in2_pad_cols - 1; ei_new += 1) {\n// figure out column position\n pos_ori = ei_new * public . in2_pad_rows;\n// loop through all rows\n sum = 0;\n \n#pragma omp parallel for private (position) firstprivate (pos_ori)\n for (position = pos_ori; position <= pos_ori + public . in2_pad_rows - 1; position = position + 1) {\n private . d_in2_pad[position] = private . d_in2_pad[position] + sum;\n sum = private . d_in2_pad[position];\n }\n } #pragma omp parallel for private (sum,pos_ori,ei_new,position)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (position) firstprivate (pos_ori)", "context_chars": 100, "text": "tion\n pos_ori = ei_new * public . in2_pad_rows;\n// loop through all rows\n sum = 0;\n \nfor (position = pos_ori; position <= pos_ori + public . in2_pad_rows - 1; position = position + 1) {\n private . d_in2_pad[position] = private . d_in2_pad[position] + sum;\n sum = private . d_in2_pad[position];\n } #pragma omp parallel for private (position) firstprivate (pos_ori)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (sum,pos_ori,ei_new,position)", "context_chars": 100, "text": "TAL CUMULATIVE SUM\t\t\t\t\t\tSAVE IN d_in2_sub\n//==================================================\n \nfor (ei_new = 0; ei_new <= public . in2_sub_rows - 1; ei_new += 1) {\n// figure out row position\n pos_ori = ei_new;\n// loop through all rows\n sum = 0;\n \n#pragma omp parallel for private (position) firstprivate (pos_ori)\n for (position = pos_ori; position <= pos_ori + public . in2_sub_elem - 1; position = position + public . in2_sub_rows) {\n private . d_in2_sub[position] = private . d_in2_sub[position] + sum;\n sum = private . d_in2_sub[position];\n }\n } #pragma omp parallel for private (sum,pos_ori,ei_new,position)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (position) firstprivate (pos_ori)", "context_chars": 100, "text": "{\n// figure out row position\n pos_ori = ei_new;\n// loop through all rows\n sum = 0;\n \nfor (position = pos_ori; position <= pos_ori + public . in2_sub_elem - 1; position = position + public . in2_sub_rows) {\n private . d_in2_sub[position] = private . d_in2_sub[position] + sum;\n sum = private . d_in2_sub[position];\n } #pragma omp parallel for private (position) firstprivate (pos_ori)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (sum,pos_ori,ei_new,position)", "context_chars": 100, "text": "ULATIVE SUM\t\t\t\t\t\tSAVE IN d_in2_pad\n//==================================================\n//work\n \nfor (ei_new = 0; ei_new <= public . in2_pad_cols - 1; ei_new += 1) {\n// figure out column position\n pos_ori = ei_new * public . in2_pad_rows;\n// loop through all rows\n sum = 0;\n \n#pragma omp parallel for private (position) firstprivate (pos_ori)\n for (position = pos_ori; position <= pos_ori + public . in2_pad_rows - 1; position = position + 1) {\n private . d_in2_pad[position] = private . d_in2_pad[position] + sum;\n sum = private . d_in2_pad[position];\n }\n } #pragma omp parallel for private (sum,pos_ori,ei_new,position)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (position) firstprivate (pos_ori)", "context_chars": 100, "text": "tion\n pos_ori = ei_new * public . in2_pad_rows;\n// loop through all rows\n sum = 0;\n \nfor (position = pos_ori; position <= pos_ori + public . in2_pad_rows - 1; position = position + 1) {\n private . d_in2_pad[position] = private . d_in2_pad[position] + sum;\n sum = private . d_in2_pad[position];\n } #pragma omp parallel for private (position) firstprivate (pos_ori)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (sum,pos_ori,ei_new,position)", "context_chars": 100, "text": "TAL CUMULATIVE SUM\t\t\t\t\t\tSAVE IN d_in2_sub\n//==================================================\n \nfor (ei_new = 0; ei_new <= public . in2_sub_rows - 1; ei_new += 1) {\n// figure out row position\n pos_ori = ei_new;\n// loop through all rows\n sum = 0;\n \n#pragma omp parallel for private (position) firstprivate (pos_ori)\n for (position = pos_ori; position <= pos_ori + public . in2_sub_elem - 1; position = position + public . in2_sub_rows) {\n private . d_in2_sub[position] = private . d_in2_sub[position] + sum;\n sum = private . d_in2_sub[position];\n }\n } #pragma omp parallel for private (sum,pos_ori,ei_new,position)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (position) firstprivate (pos_ori)", "context_chars": 100, "text": "{\n// figure out row position\n pos_ori = ei_new;\n// loop through all rows\n sum = 0;\n \nfor (position = pos_ori; position <= pos_ori + public . in2_sub_elem - 1; position = position + public . in2_sub_rows) {\n private . d_in2_sub[position] = private . d_in2_sub[position] + sum;\n sum = private . d_in2_sub[position];\n } #pragma omp parallel for private (position) firstprivate (pos_ori)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (ei_new)", "context_chars": 100, "text": "sk_col = cent + private . d_tColLoc[pointer] - private . d_Col[private . point_no] - 1;\n//work\n \nfor (ei_new = 0; ei_new <= public . tMask_elem - 1; ei_new += 1) {\n private . d_tMask[ei_new] = 0;\n } #pragma omp parallel for private (ei_new)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (jb,ib,ja,ia) reduction (+:s) firstprivate (jp1,ia1,ia2)", "context_chars": 100, "text": "sk_rows;\n }\n else {\n ia2 = i;\n }\n s = 0;\n// get data\n \nfor (ja = ja1; ja <= ja2; ja += 1) {\n jb = jp1 - ja;\n \n#pragma omp parallel for private (ib,ia) reduction (+:s) firstprivate (ip1)\n for (ia = ia1; ia <= ia2; ia += 1) {\n ib = ip1 - ia;\n s = s + private . d_tMask[public . tMask_rows * (ja - 1) + ia - 1] * 1;\n }\n } #pragma omp parallel for private (jb,ib,ja,ia) reduction (+:s) firstprivate (jp1,ia1,ia2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for private (ib,ia) reduction (+:s) firstprivate (ip1)", "context_chars": 100, "text": "vate (jp1,ia1,ia2)\n for (ja = ja1; ja <= ja2; ja += 1) {\n jb = jp1 - ja;\n \nfor (ia = ia1; ia <= ia2; ia += 1) {\n ib = ip1 - ia;\n s = s + private . d_tMask[public . tMask_rows * (ja - 1) + ia - 1] * 1;\n } #pragma omp parallel for private (ib,ia) reduction (+:s) firstprivate (ip1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "----------------------------------------------------------*/\n int i;\n int j;\n int k;\n int m;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "pragma omp parallel for private (i,j,k,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "agma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "ma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": ";\n int m;\n int d;\n double xi;\n double eta;\n double zeta;\n double u_exact[5];\n double add;\n \nfor (m = 0; m <= 4; m += 1) {\n rms[m] = 0.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (add,m)", "context_chars": 100, "text": " += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,u_exact);\n \nfor (m = 0; m <= 4; m += 1) {\n add = u[i][j][k][m] - u_exact[m];\n rms[m] = rms[m] + add * add;\n } #pragma omp parallel for private (add,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "-----------------------------------*/\n int i;\n int j;\n int k;\n int d;\n int m;\n double add;\n \nfor (m = 0; m <= 4; m += 1) {\n rms[m] = 0.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (add,m)", "context_chars": 100, "text": "grid_points[1] - 1 - 1; j += 1) {\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n add = rhs[i][j][k][m];\n rms[m] = rms[m] + add * add;\n } #pragma omp parallel for private (add,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,i,j,k)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (m,j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (m,k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n forcing[i][j][k][m] = 0.0;\n }\n }\n }\n } #pragma omp parallel for private (m,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,j,k)", "context_chars": 100, "text": "pragma omp parallel for private (m,i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (m,k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n forcing[i][j][k][m] = 0.0;\n }\n }\n } #pragma omp parallel for private (m,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,k)", "context_chars": 100, "text": "agma omp parallel for private (m,j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n forcing[i][j][k][m] = 0.0;\n }\n } #pragma omp parallel for private (m,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "ma omp parallel for private (m,k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n forcing[i][j][k][m] = 0.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1)", "context_chars": 100, "text": "nces \nc-------------------------------------------------------------------*/\n//for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n eta = ((double )j) * dnym1;\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n zeta = ((double )k) * dnzm1;\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n xi = ((double )i) * dnxm1;\n exact_solution(xi,eta,zeta,dtemp);\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n ue[i][m] = dtemp[m];\n }\n dtpp = 1.0 / dtemp[0];\n \n#pragma omp parallel for private (m) firstprivate (dtpp)\n for (m = 1; m <= 4; m += 1) {\n buf[i][m] = dtpp * dtemp[m];\n }\n cuf[i] = buf[i][1] * buf[i][1];\n buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3];\n q[i] = 0.5 * (buf[i][1] * ue[i][1] + buf[i][2] * ue[i][2] + buf[i][3] * ue[i][3]);\n }\n \n#pragma omp parallel for private (ip1,im1,i) firstprivate (tx2,xxcon1,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n im1 = i - 1;\n ip1 = i + 1;\n forcing[i][j][k][0] = forcing[i][j][k][0] - tx2 * (ue[ip1][1] - ue[im1][1]) + dx1tx1 * (ue[ip1][0] - 2.0 * ue[i][0] + ue[im1][0]);\n forcing[i][j][k][1] = forcing[i][j][k][1] - tx2 * (ue[ip1][1] * buf[ip1][1] + c2 * (ue[ip1][4] - q[ip1]) - (ue[im1][1] * buf[im1][1] + c2 * (ue[im1][4] - q[im1]))) + xxcon1 * (buf[ip1][1] - 2.0 * buf[i][1] + buf[im1][1]) + dx2tx1 * (ue[ip1][1] - 2.0 * ue[i][1] + ue[im1][1]);\n forcing[i][j][k][2] = forcing[i][j][k][2] - tx2 * (ue[ip1][2] * buf[ip1][1] - ue[im1][2] * buf[im1][1]) + xxcon2 * (buf[ip1][2] - 2.0 * buf[i][2] + buf[im1][2]) + dx3tx1 * (ue[ip1][2] - 2.0 * ue[i][2] + ue[im1][2]);\n forcing[i][j][k][3] = forcing[i][j][k][3] - tx2 * (ue[ip1][3] * buf[ip1][1] - ue[im1][3] * buf[im1][1]) + xxcon2 * (buf[ip1][3] - 2.0 * buf[i][3] + buf[im1][3]) + dx4tx1 * (ue[ip1][3] - 2.0 * ue[i][3] + ue[im1][3]);\n forcing[i][j][k][4] = forcing[i][j][k][4] - tx2 * (buf[ip1][1] * (c1 * ue[ip1][4] - c2 * q[ip1]) - buf[im1][1] * (c1 * ue[im1][4] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[ip1][0] - 2.0 * buf[i][0] + buf[im1][0]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[ip1][4] - 2.0 * buf[i][4] + buf[im1][4]) + dx5tx1 * (ue[ip1][4] - 2.0 * ue[i][4] + ue[im1][4]);\n }\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n \n#pragma omp parallel for private (i,m)\n for (m = 0; m <= 4; m += 1) {\n i = 1;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);\n i = 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (- 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);\n }\n \n#pragma omp parallel for private (m,i)\n for (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 1 * 3; i <= grid_points[0] - 3 - 1; i += 1) {\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);\n }\n }\n \n#pragma omp parallel for private (i,m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n i = grid_points[0] - 3;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m]);\n i = grid_points[0] - 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 5.0 * ue[i][m]);\n }\n }\n } #pragma omp parallel for firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "= 1) {\n xi = ((double )i) * dnxm1;\n exact_solution(xi,eta,zeta,dtemp);\n \nfor (m = 0; m <= 4; m += 1) {\n ue[i][m] = dtemp[m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dtpp)", "context_chars": 100, "text": " m += 1) {\n ue[i][m] = dtemp[m];\n }\n dtpp = 1.0 / dtemp[0];\n \nfor (m = 1; m <= 4; m += 1) {\n buf[i][m] = dtpp * dtemp[m];\n } #pragma omp parallel for private (m) firstprivate (dtpp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (ip1,im1,i) firstprivate (tx2,xxcon1,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2)", "context_chars": 100, "text": "i] = 0.5 * (buf[i][1] * ue[i][1] + buf[i][2] * ue[i][2] + buf[i][3] * ue[i][3]);\n }\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n im1 = i - 1;\n ip1 = i + 1;\n forcing[i][j][k][0] = forcing[i][j][k][0] - tx2 * (ue[ip1][1] - ue[im1][1]) + dx1tx1 * (ue[ip1][0] - 2.0 * ue[i][0] + ue[im1][0]);\n forcing[i][j][k][1] = forcing[i][j][k][1] - tx2 * (ue[ip1][1] * buf[ip1][1] + c2 * (ue[ip1][4] - q[ip1]) - (ue[im1][1] * buf[im1][1] + c2 * (ue[im1][4] - q[im1]))) + xxcon1 * (buf[ip1][1] - 2.0 * buf[i][1] + buf[im1][1]) + dx2tx1 * (ue[ip1][1] - 2.0 * ue[i][1] + ue[im1][1]);\n forcing[i][j][k][2] = forcing[i][j][k][2] - tx2 * (ue[ip1][2] * buf[ip1][1] - ue[im1][2] * buf[im1][1]) + xxcon2 * (buf[ip1][2] - 2.0 * buf[i][2] + buf[im1][2]) + dx3tx1 * (ue[ip1][2] - 2.0 * ue[i][2] + ue[im1][2]);\n forcing[i][j][k][3] = forcing[i][j][k][3] - tx2 * (ue[ip1][3] * buf[ip1][1] - ue[im1][3] * buf[im1][1]) + xxcon2 * (buf[ip1][3] - 2.0 * buf[i][3] + buf[im1][3]) + dx4tx1 * (ue[ip1][3] - 2.0 * ue[i][3] + ue[im1][3]);\n forcing[i][j][k][4] = forcing[i][j][k][4] - tx2 * (buf[ip1][1] * (c1 * ue[ip1][4] - c2 * q[ip1]) - buf[im1][1] * (c1 * ue[im1][4] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[ip1][0] - 2.0 * buf[i][0] + buf[im1][0]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[ip1][4] - 2.0 * buf[i][4] + buf[im1][4]) + dx5tx1 * (ue[ip1][4] - 2.0 * ue[i][4] + ue[im1][4]);\n } #pragma omp parallel for private (ip1,im1,i) firstprivate (tx2,xxcon1,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,m)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n i = 1;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);\n i = 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (- 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);\n } #pragma omp parallel for private (i,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,i)", "context_chars": 100, "text": "p * (- 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 1 * 3; i <= grid_points[0] - 3 - 1; i += 1) {\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);\n }\n } #pragma omp parallel for private (m,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " }\n \n#pragma omp parallel for private (m,i)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1 * 3; i <= grid_points[0] - 3 - 1; i += 1) {\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,m) firstprivate (dssp)", "context_chars": 100, "text": " ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n i = grid_points[0] - 3;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m]);\n i = grid_points[0] - 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 5.0 * ue[i][m]);\n } #pragma omp parallel for private (i,m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(xi, zeta, eta) ", "context_chars": 100, "text": "x differences \nc-------------------------------------------------------------------*/\n//for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n xi = ((double )i) * dnxm1;\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n zeta = ((double )k) * dnzm1;\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n eta = ((double )j) * dnym1;\n exact_solution(xi,eta,zeta,dtemp);\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n ue[j][m] = dtemp[m];\n }\n dtpp = 1.0 / dtemp[0];\n \n#pragma omp parallel for private (m) firstprivate (dtpp)\n for (m = 1; m <= 4; m += 1) {\n buf[j][m] = dtpp * dtemp[m];\n }\n cuf[j] = buf[j][2] * buf[j][2];\n buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3];\n q[j] = 0.5 * (buf[j][1] * ue[j][1] + buf[j][2] * ue[j][2] + buf[j][3] * ue[j][3]);\n }\n \n#pragma omp parallel for private (jp1,jm1,j) firstprivate (ty2,yycon1,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n jm1 = j - 1;\n jp1 = j + 1;\n forcing[i][j][k][0] = forcing[i][j][k][0] - ty2 * (ue[jp1][2] - ue[jm1][2]) + dy1ty1 * (ue[jp1][0] - 2.0 * ue[j][0] + ue[jm1][0]);\n forcing[i][j][k][1] = forcing[i][j][k][1] - ty2 * (ue[jp1][1] * buf[jp1][2] - ue[jm1][1] * buf[jm1][2]) + yycon2 * (buf[jp1][1] - 2.0 * buf[j][1] + buf[jm1][1]) + dy2ty1 * (ue[jp1][1] - 2.0 * ue[j][1] + ue[jm1][1]);\n forcing[i][j][k][2] = forcing[i][j][k][2] - ty2 * (ue[jp1][2] * buf[jp1][2] + c2 * (ue[jp1][4] - q[jp1]) - (ue[jm1][2] * buf[jm1][2] + c2 * (ue[jm1][4] - q[jm1]))) + yycon1 * (buf[jp1][2] - 2.0 * buf[j][2] + buf[jm1][2]) + dy3ty1 * (ue[jp1][2] - 2.0 * ue[j][2] + ue[jm1][2]);\n forcing[i][j][k][3] = forcing[i][j][k][3] - ty2 * (ue[jp1][3] * buf[jp1][2] - ue[jm1][3] * buf[jm1][2]) + yycon2 * (buf[jp1][3] - 2.0 * buf[j][3] + buf[jm1][3]) + dy4ty1 * (ue[jp1][3] - 2.0 * ue[j][3] + ue[jm1][3]);\n forcing[i][j][k][4] = forcing[i][j][k][4] - ty2 * (buf[jp1][2] * (c1 * ue[jp1][4] - c2 * q[jp1]) - buf[jm1][2] * (c1 * ue[jm1][4] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[jp1][0] - 2.0 * buf[j][0] + buf[jm1][0]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[jp1][4] - 2.0 * buf[j][4] + buf[jm1][4]) + dy5ty1 * (ue[jp1][4] - 2.0 * ue[j][4] + ue[jm1][4]);\n }\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n \n#pragma omp parallel for private (j,m)\n for (m = 0; m <= 4; m += 1) {\n j = 1;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);\n j = 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (- 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);\n }\n \n#pragma omp parallel for private (m,j)\n for (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 1 * 3; j <= grid_points[1] - 3 - 1; j += 1) {\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);\n }\n }\n \n#pragma omp parallel for private (j,m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n j = grid_points[1] - 3;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m]);\n j = grid_points[1] - 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 5.0 * ue[j][m]);\n }\n }\n } #pragma omp parallel for private(xi, zeta, eta) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " 1) {\n eta = ((double )j) * dnym1;\n exact_solution(xi,eta,zeta,dtemp);\n \nfor (m = 0; m <= 4; m += 1) {\n ue[j][m] = dtemp[m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dtpp)", "context_chars": 100, "text": " m += 1) {\n ue[j][m] = dtemp[m];\n }\n dtpp = 1.0 / dtemp[0];\n \nfor (m = 1; m <= 4; m += 1) {\n buf[j][m] = dtpp * dtemp[m];\n } #pragma omp parallel for private (m) firstprivate (dtpp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (jp1,jm1,j) firstprivate (ty2,yycon1,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2)", "context_chars": 100, "text": "j] = 0.5 * (buf[j][1] * ue[j][1] + buf[j][2] * ue[j][2] + buf[j][3] * ue[j][3]);\n }\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n jm1 = j - 1;\n jp1 = j + 1;\n forcing[i][j][k][0] = forcing[i][j][k][0] - ty2 * (ue[jp1][2] - ue[jm1][2]) + dy1ty1 * (ue[jp1][0] - 2.0 * ue[j][0] + ue[jm1][0]);\n forcing[i][j][k][1] = forcing[i][j][k][1] - ty2 * (ue[jp1][1] * buf[jp1][2] - ue[jm1][1] * buf[jm1][2]) + yycon2 * (buf[jp1][1] - 2.0 * buf[j][1] + buf[jm1][1]) + dy2ty1 * (ue[jp1][1] - 2.0 * ue[j][1] + ue[jm1][1]);\n forcing[i][j][k][2] = forcing[i][j][k][2] - ty2 * (ue[jp1][2] * buf[jp1][2] + c2 * (ue[jp1][4] - q[jp1]) - (ue[jm1][2] * buf[jm1][2] + c2 * (ue[jm1][4] - q[jm1]))) + yycon1 * (buf[jp1][2] - 2.0 * buf[j][2] + buf[jm1][2]) + dy3ty1 * (ue[jp1][2] - 2.0 * ue[j][2] + ue[jm1][2]);\n forcing[i][j][k][3] = forcing[i][j][k][3] - ty2 * (ue[jp1][3] * buf[jp1][2] - ue[jm1][3] * buf[jm1][2]) + yycon2 * (buf[jp1][3] - 2.0 * buf[j][3] + buf[jm1][3]) + dy4ty1 * (ue[jp1][3] - 2.0 * ue[j][3] + ue[jm1][3]);\n forcing[i][j][k][4] = forcing[i][j][k][4] - ty2 * (buf[jp1][2] * (c1 * ue[jp1][4] - c2 * q[jp1]) - buf[jm1][2] * (c1 * ue[jm1][4] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[jp1][0] - 2.0 * buf[j][0] + buf[jm1][0]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[jp1][4] - 2.0 * buf[j][4] + buf[jm1][4]) + dy5ty1 * (ue[jp1][4] - 2.0 * ue[j][4] + ue[jm1][4]);\n } #pragma omp parallel for private (jp1,jm1,j) firstprivate (ty2,yycon1,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n j = 1;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);\n j = 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (- 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,j)", "context_chars": 100, "text": "p * (- 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 1 * 3; j <= grid_points[1] - 3 - 1; j += 1) {\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);\n }\n } #pragma omp parallel for private (m,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": " }\n \n#pragma omp parallel for private (m,j)\n for (m = 0; m <= 4; m += 1) {\n \nfor (j = 1 * 3; j <= grid_points[1] - 3 - 1; j += 1) {\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,m) firstprivate (dssp)", "context_chars": 100, "text": " ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n j = grid_points[1] - 3;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m]);\n j = grid_points[1] - 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 5.0 * ue[j][m]);\n } #pragma omp parallel for private (j,m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(xi, eta, zeta)", "context_chars": 100, "text": "nces \nc-------------------------------------------------------------------*/\n//for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n xi = ((double )i) * dnxm1;\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n eta = ((double )j) * dnym1;\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,dtemp);\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n ue[k][m] = dtemp[m];\n }\n dtpp = 1.0 / dtemp[0];\n \n#pragma omp parallel for private (m) firstprivate (dtpp)\n for (m = 1; m <= 4; m += 1) {\n buf[k][m] = dtpp * dtemp[m];\n }\n cuf[k] = buf[k][3] * buf[k][3];\n buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2];\n q[k] = 0.5 * (buf[k][1] * ue[k][1] + buf[k][2] * ue[k][2] + buf[k][3] * ue[k][3]);\n }\n \n#pragma omp parallel for private (km1,kp1,k) firstprivate (tz2,zzcon1,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n km1 = k - 1;\n kp1 = k + 1;\n forcing[i][j][k][0] = forcing[i][j][k][0] - tz2 * (ue[kp1][3] - ue[km1][3]) + dz1tz1 * (ue[kp1][0] - 2.0 * ue[k][0] + ue[km1][0]);\n forcing[i][j][k][1] = forcing[i][j][k][1] - tz2 * (ue[kp1][1] * buf[kp1][3] - ue[km1][1] * buf[km1][3]) + zzcon2 * (buf[kp1][1] - 2.0 * buf[k][1] + buf[km1][1]) + dz2tz1 * (ue[kp1][1] - 2.0 * ue[k][1] + ue[km1][1]);\n forcing[i][j][k][2] = forcing[i][j][k][2] - tz2 * (ue[kp1][2] * buf[kp1][3] - ue[km1][2] * buf[km1][3]) + zzcon2 * (buf[kp1][2] - 2.0 * buf[k][2] + buf[km1][2]) + dz3tz1 * (ue[kp1][2] - 2.0 * ue[k][2] + ue[km1][2]);\n forcing[i][j][k][3] = forcing[i][j][k][3] - tz2 * (ue[kp1][3] * buf[kp1][3] + c2 * (ue[kp1][4] - q[kp1]) - (ue[km1][3] * buf[km1][3] + c2 * (ue[km1][4] - q[km1]))) + zzcon1 * (buf[kp1][3] - 2.0 * buf[k][3] + buf[km1][3]) + dz4tz1 * (ue[kp1][3] - 2.0 * ue[k][3] + ue[km1][3]);\n forcing[i][j][k][4] = forcing[i][j][k][4] - tz2 * (buf[kp1][3] * (c1 * ue[kp1][4] - c2 * q[kp1]) - buf[km1][3] * (c1 * ue[km1][4] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[kp1][0] - 2.0 * buf[k][0] + buf[km1][0]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[kp1][4] - 2.0 * buf[k][4] + buf[km1][4]) + dz5tz1 * (ue[kp1][4] - 2.0 * ue[k][4] + ue[km1][4]);\n }\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n \n#pragma omp parallel for private (k,m)\n for (m = 0; m <= 4; m += 1) {\n k = 1;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);\n k = 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (- 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);\n }\n \n#pragma omp parallel for private (m,k)\n for (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 1 * 3; k <= grid_points[2] - 3 - 1; k += 1) {\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);\n }\n }\n \n#pragma omp parallel for private (k,m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n k = grid_points[2] - 3;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m]);\n k = grid_points[2] - 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 5.0 * ue[k][m]);\n }\n }\n } #pragma omp parallel for private(xi, eta, zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,dtemp);\n \nfor (m = 0; m <= 4; m += 1) {\n ue[k][m] = dtemp[m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dtpp)", "context_chars": 100, "text": " m += 1) {\n ue[k][m] = dtemp[m];\n }\n dtpp = 1.0 / dtemp[0];\n \nfor (m = 1; m <= 4; m += 1) {\n buf[k][m] = dtpp * dtemp[m];\n } #pragma omp parallel for private (m) firstprivate (dtpp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (km1,kp1,k) firstprivate (tz2,zzcon1,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2)", "context_chars": 100, "text": "k] = 0.5 * (buf[k][1] * ue[k][1] + buf[k][2] * ue[k][2] + buf[k][3] * ue[k][3]);\n }\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n km1 = k - 1;\n kp1 = k + 1;\n forcing[i][j][k][0] = forcing[i][j][k][0] - tz2 * (ue[kp1][3] - ue[km1][3]) + dz1tz1 * (ue[kp1][0] - 2.0 * ue[k][0] + ue[km1][0]);\n forcing[i][j][k][1] = forcing[i][j][k][1] - tz2 * (ue[kp1][1] * buf[kp1][3] - ue[km1][1] * buf[km1][3]) + zzcon2 * (buf[kp1][1] - 2.0 * buf[k][1] + buf[km1][1]) + dz2tz1 * (ue[kp1][1] - 2.0 * ue[k][1] + ue[km1][1]);\n forcing[i][j][k][2] = forcing[i][j][k][2] - tz2 * (ue[kp1][2] * buf[kp1][3] - ue[km1][2] * buf[km1][3]) + zzcon2 * (buf[kp1][2] - 2.0 * buf[k][2] + buf[km1][2]) + dz3tz1 * (ue[kp1][2] - 2.0 * ue[k][2] + ue[km1][2]);\n forcing[i][j][k][3] = forcing[i][j][k][3] - tz2 * (ue[kp1][3] * buf[kp1][3] + c2 * (ue[kp1][4] - q[kp1]) - (ue[km1][3] * buf[km1][3] + c2 * (ue[km1][4] - q[km1]))) + zzcon1 * (buf[kp1][3] - 2.0 * buf[k][3] + buf[km1][3]) + dz4tz1 * (ue[kp1][3] - 2.0 * ue[k][3] + ue[km1][3]);\n forcing[i][j][k][4] = forcing[i][j][k][4] - tz2 * (buf[kp1][3] * (c1 * ue[kp1][4] - c2 * q[kp1]) - buf[km1][3] * (c1 * ue[km1][4] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[kp1][0] - 2.0 * buf[k][0] + buf[km1][0]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[kp1][4] - 2.0 * buf[k][4] + buf[km1][4]) + dz5tz1 * (ue[kp1][4] - 2.0 * ue[k][4] + ue[km1][4]);\n } #pragma omp parallel for private (km1,kp1,k) firstprivate (tz2,zzcon1,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n k = 1;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);\n k = 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (- 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,k)", "context_chars": 100, "text": "p * (- 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 1 * 3; k <= grid_points[2] - 3 - 1; k += 1) {\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);\n }\n } #pragma omp parallel for private (m,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": " }\n \n#pragma omp parallel for private (m,k)\n for (m = 0; m <= 4; m += 1) {\n \nfor (k = 1 * 3; k <= grid_points[2] - 3 - 1; k += 1) {\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m) firstprivate (dssp)", "context_chars": 100, "text": " ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n k = grid_points[2] - 3;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m]);\n k = grid_points[2] - 2;\n forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 5.0 * ue[k][m]);\n } #pragma omp parallel for private (k,m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,i,j,k)", "context_chars": 100, "text": " the forcing function, \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (m,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m,k)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n forcing[i][j][k][m] = - 1.0 * forcing[i][j][k][m];\n }\n }\n }\n } #pragma omp parallel for private (m,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,j,k)", "context_chars": 100, "text": "ma omp parallel for private (m,i,j,k)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m,k)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n forcing[i][j][k][m] = - 1.0 * forcing[i][j][k][m];\n }\n }\n } #pragma omp parallel for private (m,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,k)", "context_chars": 100, "text": " omp parallel for private (m,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n forcing[i][j][k][m] = - 1.0 * forcing[i][j][k][m];\n }\n } #pragma omp parallel for private (m,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "mp parallel for private (m,k)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n forcing[i][j][k][m] = - 1.0 * forcing[i][j][k][m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (xi,eta,zeta)", "context_chars": 100, "text": " xi, eta, zeta \nc-------------------------------------------------------------------*/\n int m;\n \nfor (m = 0; m <= 4; m += 1) {\n dtemp[m] = ce[m][0] + xi * (ce[m][1] + xi * (ce[m][4] + xi * (ce[m][7] + xi * ce[m][10]))) + eta * (ce[m][2] + eta * (ce[m][5] + eta * (ce[m][8] + eta * ce[m][11]))) + zeta * (ce[m][3] + zeta * (ce[m][6] + zeta * (ce[m][9] + zeta * ce[m][12])));\n } #pragma omp parallel for private (m) firstprivate (xi,eta,zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": " the whole thing here. \nc-------------------------------------------------------------------*/\n \nfor (i = 0; i <= 63; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= 63; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= 63; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = 1.0;\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "-------*/\n \n#pragma omp parallel for private (i,j,k,m)\n for (i = 0; i <= 63; i += 1) {\n \nfor (j = 0; j <= 63; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= 63; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = 1.0;\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": " 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= 63; j += 1) {\n \nfor (k = 0; k <= 63; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = 1.0;\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "{\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= 63; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = 1.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (Pxi,Peta,Pzeta,m) firstprivate (xi,eta,zeta)", "context_chars": 100, "text": " iz += 1) {\n exact_solution(xi,eta,(double )iz,&Pface[iz][2][0]);\n }\n \nfor (m = 0; m <= 4; m += 1) {\n Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m];\n Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m];\n Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m];\n u[i][j][k][m] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta;\n } #pragma omp parallel for private (Pxi,Peta,Pzeta,m) firstprivate (xi,eta,zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (i)", "context_chars": 100, "text": "; k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (i)", "context_chars": 100, "text": "; k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (j)", "context_chars": 100, "text": "; k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (j)", "context_chars": 100, "text": "; k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (k)", "context_chars": 100, "text": "1; j += 1) {\n eta = ((double )j) * dnym1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (k)", "context_chars": 100, "text": "1; j += 1) {\n eta = ((double )j) * dnym1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m,n)", "context_chars": 100, "text": " hand side for starters\nc-------------------------------------------------------------------*/\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m,n)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k,m,n)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (n)\n for (n = 0; n <= 4; n += 1) {\n lhs[i][j][k][0][m][n] = 0.0;\n lhs[i][j][k][1][m][n] = 0.0;\n lhs[i][j][k][2][m][n] = 0.0;\n }\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m,n)", "context_chars": 100, "text": "agma omp parallel for private (i,j,k,m,n)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k,m,n)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (n)\n for (n = 0; n <= 4; n += 1) {\n lhs[i][j][k][0][m][n] = 0.0;\n lhs[i][j][k][1][m][n] = 0.0;\n lhs[i][j][k][2][m][n] = 0.0;\n }\n }\n }\n } #pragma omp parallel for private (j,k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m,n)", "context_chars": 100, "text": "ma omp parallel for private (j,k,m,n)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (n)\n for (n = 0; n <= 4; n += 1) {\n lhs[i][j][k][0][m][n] = 0.0;\n lhs[i][j][k][1][m][n] = 0.0;\n lhs[i][j][k][2][m][n] = 0.0;\n }\n }\n } #pragma omp parallel for private (k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,n)", "context_chars": 100, "text": " omp parallel for private (k,m,n)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (n)\n for (n = 0; n <= 4; n += 1) {\n lhs[i][j][k][0][m][n] = 0.0;\n lhs[i][j][k][1][m][n] = 0.0;\n lhs[i][j][k][2][m][n] = 0.0;\n }\n } #pragma omp parallel for private (m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (n)", "context_chars": 100, "text": " \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n \nfor (n = 0; n <= 4; n += 1) {\n lhs[i][j][k][0][m][n] = 0.0;\n lhs[i][j][k][1][m][n] = 0.0;\n lhs[i][j][k][2][m][n] = 0.0;\n } #pragma omp parallel for private (n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "verkill, but convenient\nc-------------------------------------------------------------------*/\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n lhs[i][j][k][1][m][m] = 1.0;\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "pragma omp parallel for private (i,j,k,m)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n lhs[i][j][k][1][m][m] = 1.0;\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "agma omp parallel for private (j,k,m)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n lhs[i][j][k][1][m][m] = 1.0;\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "ma omp parallel for private (k,m)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n lhs[i][j][k][1][m][m] = 1.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,i,j,k)", "context_chars": 100, "text": "abeled f) and n jacobians\nc-------------------------------------------------------------------*/\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,i,k)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,i) firstprivate (c3c4,c1345,c1,c2,con43)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n/*--------------------------------------------------------------------\nc \nc-------------------------------------------------------------------*/\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 1.0;\n fjac[i][j][k][0][2] = 0.0;\n fjac[i][j][k][0][3] = 0.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][1][1] = (2.0 - c2) * (u[i][j][k][1] / u[i][j][k][0]);\n fjac[i][j][k][1][2] = -c2 * (u[i][j][k][2] * tmp1);\n fjac[i][j][k][1][3] = -c2 * (u[i][j][k][3] * tmp1);\n fjac[i][j][k][1][4] = c2;\n fjac[i][j][k][2][0] = -(u[i][j][k][1] * u[i][j][k][2]) * tmp2;\n fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][2][3] = 0.0;\n fjac[i][j][k][2][4] = 0.0;\n fjac[i][j][k][3][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][2] = 0.0;\n fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][3][4] = 0.0;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][1] * tmp1);\n fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * (3.0 * u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][1]) * tmp2;\n fjac[i][j][k][4][3] = -c2 * (u[i][j][k][3] * u[i][j][k][1]) * tmp2;\n fjac[i][j][k][4][4] = c1 * (u[i][j][k][1] * tmp1);\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -con43 * c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = con43 * c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = c3c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n }\n/*--------------------------------------------------------------------\nc now jacobians set, so form left hand side in x direction\nc-------------------------------------------------------------------*/\n \n#pragma omp parallel for private (tmp1,tmp2,i) firstprivate (tx1,tx2,dx1,dx2,dx3,dx4,dx5,dt)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n tmp1 = dt * tx1;\n tmp2 = dt * tx2;\n lhs[i][j][k][0][0][0] = -tmp2 * fjac[i - 1][j][k][0][0] - tmp1 * njac[i - 1][j][k][0][0] - tmp1 * dx1;\n lhs[i][j][k][0][0][1] = -tmp2 * fjac[i - 1][j][k][0][1] - tmp1 * njac[i - 1][j][k][0][1];\n lhs[i][j][k][0][0][2] = -tmp2 * fjac[i - 1][j][k][0][2] - tmp1 * njac[i - 1][j][k][0][2];\n lhs[i][j][k][0][0][3] = -tmp2 * fjac[i - 1][j][k][0][3] - tmp1 * njac[i - 1][j][k][0][3];\n lhs[i][j][k][0][0][4] = -tmp2 * fjac[i - 1][j][k][0][4] - tmp1 * njac[i - 1][j][k][0][4];\n lhs[i][j][k][0][1][0] = -tmp2 * fjac[i - 1][j][k][1][0] - tmp1 * njac[i - 1][j][k][1][0];\n lhs[i][j][k][0][1][1] = -tmp2 * fjac[i - 1][j][k][1][1] - tmp1 * njac[i - 1][j][k][1][1] - tmp1 * dx2;\n lhs[i][j][k][0][1][2] = -tmp2 * fjac[i - 1][j][k][1][2] - tmp1 * njac[i - 1][j][k][1][2];\n lhs[i][j][k][0][1][3] = -tmp2 * fjac[i - 1][j][k][1][3] - tmp1 * njac[i - 1][j][k][1][3];\n lhs[i][j][k][0][1][4] = -tmp2 * fjac[i - 1][j][k][1][4] - tmp1 * njac[i - 1][j][k][1][4];\n lhs[i][j][k][0][2][0] = -tmp2 * fjac[i - 1][j][k][2][0] - tmp1 * njac[i - 1][j][k][2][0];\n lhs[i][j][k][0][2][1] = -tmp2 * fjac[i - 1][j][k][2][1] - tmp1 * njac[i - 1][j][k][2][1];\n lhs[i][j][k][0][2][2] = -tmp2 * fjac[i - 1][j][k][2][2] - tmp1 * njac[i - 1][j][k][2][2] - tmp1 * dx3;\n lhs[i][j][k][0][2][3] = -tmp2 * fjac[i - 1][j][k][2][3] - tmp1 * njac[i - 1][j][k][2][3];\n lhs[i][j][k][0][2][4] = -tmp2 * fjac[i - 1][j][k][2][4] - tmp1 * njac[i - 1][j][k][2][4];\n lhs[i][j][k][0][3][0] = -tmp2 * fjac[i - 1][j][k][3][0] - tmp1 * njac[i - 1][j][k][3][0];\n lhs[i][j][k][0][3][1] = -tmp2 * fjac[i - 1][j][k][3][1] - tmp1 * njac[i - 1][j][k][3][1];\n lhs[i][j][k][0][3][2] = -tmp2 * fjac[i - 1][j][k][3][2] - tmp1 * njac[i - 1][j][k][3][2];\n lhs[i][j][k][0][3][3] = -tmp2 * fjac[i - 1][j][k][3][3] - tmp1 * njac[i - 1][j][k][3][3] - tmp1 * dx4;\n lhs[i][j][k][0][3][4] = -tmp2 * fjac[i - 1][j][k][3][4] - tmp1 * njac[i - 1][j][k][3][4];\n lhs[i][j][k][0][4][0] = -tmp2 * fjac[i - 1][j][k][4][0] - tmp1 * njac[i - 1][j][k][4][0];\n lhs[i][j][k][0][4][1] = -tmp2 * fjac[i - 1][j][k][4][1] - tmp1 * njac[i - 1][j][k][4][1];\n lhs[i][j][k][0][4][2] = -tmp2 * fjac[i - 1][j][k][4][2] - tmp1 * njac[i - 1][j][k][4][2];\n lhs[i][j][k][0][4][3] = -tmp2 * fjac[i - 1][j][k][4][3] - tmp1 * njac[i - 1][j][k][4][3];\n lhs[i][j][k][0][4][4] = -tmp2 * fjac[i - 1][j][k][4][4] - tmp1 * njac[i - 1][j][k][4][4] - tmp1 * dx5;\n lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1;\n lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2;\n lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3;\n lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4;\n lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5;\n lhs[i][j][k][2][0][0] = tmp2 * fjac[i + 1][j][k][0][0] - tmp1 * njac[i + 1][j][k][0][0] - tmp1 * dx1;\n lhs[i][j][k][2][0][1] = tmp2 * fjac[i + 1][j][k][0][1] - tmp1 * njac[i + 1][j][k][0][1];\n lhs[i][j][k][2][0][2] = tmp2 * fjac[i + 1][j][k][0][2] - tmp1 * njac[i + 1][j][k][0][2];\n lhs[i][j][k][2][0][3] = tmp2 * fjac[i + 1][j][k][0][3] - tmp1 * njac[i + 1][j][k][0][3];\n lhs[i][j][k][2][0][4] = tmp2 * fjac[i + 1][j][k][0][4] - tmp1 * njac[i + 1][j][k][0][4];\n lhs[i][j][k][2][1][0] = tmp2 * fjac[i + 1][j][k][1][0] - tmp1 * njac[i + 1][j][k][1][0];\n lhs[i][j][k][2][1][1] = tmp2 * fjac[i + 1][j][k][1][1] - tmp1 * njac[i + 1][j][k][1][1] - tmp1 * dx2;\n lhs[i][j][k][2][1][2] = tmp2 * fjac[i + 1][j][k][1][2] - tmp1 * njac[i + 1][j][k][1][2];\n lhs[i][j][k][2][1][3] = tmp2 * fjac[i + 1][j][k][1][3] - tmp1 * njac[i + 1][j][k][1][3];\n lhs[i][j][k][2][1][4] = tmp2 * fjac[i + 1][j][k][1][4] - tmp1 * njac[i + 1][j][k][1][4];\n lhs[i][j][k][2][2][0] = tmp2 * fjac[i + 1][j][k][2][0] - tmp1 * njac[i + 1][j][k][2][0];\n lhs[i][j][k][2][2][1] = tmp2 * fjac[i + 1][j][k][2][1] - tmp1 * njac[i + 1][j][k][2][1];\n lhs[i][j][k][2][2][2] = tmp2 * fjac[i + 1][j][k][2][2] - tmp1 * njac[i + 1][j][k][2][2] - tmp1 * dx3;\n lhs[i][j][k][2][2][3] = tmp2 * fjac[i + 1][j][k][2][3] - tmp1 * njac[i + 1][j][k][2][3];\n lhs[i][j][k][2][2][4] = tmp2 * fjac[i + 1][j][k][2][4] - tmp1 * njac[i + 1][j][k][2][4];\n lhs[i][j][k][2][3][0] = tmp2 * fjac[i + 1][j][k][3][0] - tmp1 * njac[i + 1][j][k][3][0];\n lhs[i][j][k][2][3][1] = tmp2 * fjac[i + 1][j][k][3][1] - tmp1 * njac[i + 1][j][k][3][1];\n lhs[i][j][k][2][3][2] = tmp2 * fjac[i + 1][j][k][3][2] - tmp1 * njac[i + 1][j][k][3][2];\n lhs[i][j][k][2][3][3] = tmp2 * fjac[i + 1][j][k][3][3] - tmp1 * njac[i + 1][j][k][3][3] - tmp1 * dx4;\n lhs[i][j][k][2][3][4] = tmp2 * fjac[i + 1][j][k][3][4] - tmp1 * njac[i + 1][j][k][3][4];\n lhs[i][j][k][2][4][0] = tmp2 * fjac[i + 1][j][k][4][0] - tmp1 * njac[i + 1][j][k][4][0];\n lhs[i][j][k][2][4][1] = tmp2 * fjac[i + 1][j][k][4][1] - tmp1 * njac[i + 1][j][k][4][1];\n lhs[i][j][k][2][4][2] = tmp2 * fjac[i + 1][j][k][4][2] - tmp1 * njac[i + 1][j][k][4][2];\n lhs[i][j][k][2][4][3] = tmp2 * fjac[i + 1][j][k][4][3] - tmp1 * njac[i + 1][j][k][4][3];\n lhs[i][j][k][2][4][4] = tmp2 * fjac[i + 1][j][k][4][4] - tmp1 * njac[i + 1][j][k][4][4] - tmp1 * dx5;\n }\n }\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,i,k)", "context_chars": 100, "text": "rallel for private (tmp1,tmp2,tmp3,i,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,i) firstprivate (c3c4,c1345,c1,c2,con43)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n/*--------------------------------------------------------------------\nc \nc-------------------------------------------------------------------*/\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 1.0;\n fjac[i][j][k][0][2] = 0.0;\n fjac[i][j][k][0][3] = 0.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][1][1] = (2.0 - c2) * (u[i][j][k][1] / u[i][j][k][0]);\n fjac[i][j][k][1][2] = -c2 * (u[i][j][k][2] * tmp1);\n fjac[i][j][k][1][3] = -c2 * (u[i][j][k][3] * tmp1);\n fjac[i][j][k][1][4] = c2;\n fjac[i][j][k][2][0] = -(u[i][j][k][1] * u[i][j][k][2]) * tmp2;\n fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][2][3] = 0.0;\n fjac[i][j][k][2][4] = 0.0;\n fjac[i][j][k][3][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][2] = 0.0;\n fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][3][4] = 0.0;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][1] * tmp1);\n fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * (3.0 * u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][1]) * tmp2;\n fjac[i][j][k][4][3] = -c2 * (u[i][j][k][3] * u[i][j][k][1]) * tmp2;\n fjac[i][j][k][4][4] = c1 * (u[i][j][k][1] * tmp1);\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -con43 * c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = con43 * c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = c3c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n }\n/*--------------------------------------------------------------------\nc now jacobians set, so form left hand side in x direction\nc-------------------------------------------------------------------*/\n \n#pragma omp parallel for private (tmp1,tmp2,i) firstprivate (tx1,tx2,dx1,dx2,dx3,dx4,dx5,dt)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n tmp1 = dt * tx1;\n tmp2 = dt * tx2;\n lhs[i][j][k][0][0][0] = -tmp2 * fjac[i - 1][j][k][0][0] - tmp1 * njac[i - 1][j][k][0][0] - tmp1 * dx1;\n lhs[i][j][k][0][0][1] = -tmp2 * fjac[i - 1][j][k][0][1] - tmp1 * njac[i - 1][j][k][0][1];\n lhs[i][j][k][0][0][2] = -tmp2 * fjac[i - 1][j][k][0][2] - tmp1 * njac[i - 1][j][k][0][2];\n lhs[i][j][k][0][0][3] = -tmp2 * fjac[i - 1][j][k][0][3] - tmp1 * njac[i - 1][j][k][0][3];\n lhs[i][j][k][0][0][4] = -tmp2 * fjac[i - 1][j][k][0][4] - tmp1 * njac[i - 1][j][k][0][4];\n lhs[i][j][k][0][1][0] = -tmp2 * fjac[i - 1][j][k][1][0] - tmp1 * njac[i - 1][j][k][1][0];\n lhs[i][j][k][0][1][1] = -tmp2 * fjac[i - 1][j][k][1][1] - tmp1 * njac[i - 1][j][k][1][1] - tmp1 * dx2;\n lhs[i][j][k][0][1][2] = -tmp2 * fjac[i - 1][j][k][1][2] - tmp1 * njac[i - 1][j][k][1][2];\n lhs[i][j][k][0][1][3] = -tmp2 * fjac[i - 1][j][k][1][3] - tmp1 * njac[i - 1][j][k][1][3];\n lhs[i][j][k][0][1][4] = -tmp2 * fjac[i - 1][j][k][1][4] - tmp1 * njac[i - 1][j][k][1][4];\n lhs[i][j][k][0][2][0] = -tmp2 * fjac[i - 1][j][k][2][0] - tmp1 * njac[i - 1][j][k][2][0];\n lhs[i][j][k][0][2][1] = -tmp2 * fjac[i - 1][j][k][2][1] - tmp1 * njac[i - 1][j][k][2][1];\n lhs[i][j][k][0][2][2] = -tmp2 * fjac[i - 1][j][k][2][2] - tmp1 * njac[i - 1][j][k][2][2] - tmp1 * dx3;\n lhs[i][j][k][0][2][3] = -tmp2 * fjac[i - 1][j][k][2][3] - tmp1 * njac[i - 1][j][k][2][3];\n lhs[i][j][k][0][2][4] = -tmp2 * fjac[i - 1][j][k][2][4] - tmp1 * njac[i - 1][j][k][2][4];\n lhs[i][j][k][0][3][0] = -tmp2 * fjac[i - 1][j][k][3][0] - tmp1 * njac[i - 1][j][k][3][0];\n lhs[i][j][k][0][3][1] = -tmp2 * fjac[i - 1][j][k][3][1] - tmp1 * njac[i - 1][j][k][3][1];\n lhs[i][j][k][0][3][2] = -tmp2 * fjac[i - 1][j][k][3][2] - tmp1 * njac[i - 1][j][k][3][2];\n lhs[i][j][k][0][3][3] = -tmp2 * fjac[i - 1][j][k][3][3] - tmp1 * njac[i - 1][j][k][3][3] - tmp1 * dx4;\n lhs[i][j][k][0][3][4] = -tmp2 * fjac[i - 1][j][k][3][4] - tmp1 * njac[i - 1][j][k][3][4];\n lhs[i][j][k][0][4][0] = -tmp2 * fjac[i - 1][j][k][4][0] - tmp1 * njac[i - 1][j][k][4][0];\n lhs[i][j][k][0][4][1] = -tmp2 * fjac[i - 1][j][k][4][1] - tmp1 * njac[i - 1][j][k][4][1];\n lhs[i][j][k][0][4][2] = -tmp2 * fjac[i - 1][j][k][4][2] - tmp1 * njac[i - 1][j][k][4][2];\n lhs[i][j][k][0][4][3] = -tmp2 * fjac[i - 1][j][k][4][3] - tmp1 * njac[i - 1][j][k][4][3];\n lhs[i][j][k][0][4][4] = -tmp2 * fjac[i - 1][j][k][4][4] - tmp1 * njac[i - 1][j][k][4][4] - tmp1 * dx5;\n lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1;\n lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2;\n lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3;\n lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4;\n lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5;\n lhs[i][j][k][2][0][0] = tmp2 * fjac[i + 1][j][k][0][0] - tmp1 * njac[i + 1][j][k][0][0] - tmp1 * dx1;\n lhs[i][j][k][2][0][1] = tmp2 * fjac[i + 1][j][k][0][1] - tmp1 * njac[i + 1][j][k][0][1];\n lhs[i][j][k][2][0][2] = tmp2 * fjac[i + 1][j][k][0][2] - tmp1 * njac[i + 1][j][k][0][2];\n lhs[i][j][k][2][0][3] = tmp2 * fjac[i + 1][j][k][0][3] - tmp1 * njac[i + 1][j][k][0][3];\n lhs[i][j][k][2][0][4] = tmp2 * fjac[i + 1][j][k][0][4] - tmp1 * njac[i + 1][j][k][0][4];\n lhs[i][j][k][2][1][0] = tmp2 * fjac[i + 1][j][k][1][0] - tmp1 * njac[i + 1][j][k][1][0];\n lhs[i][j][k][2][1][1] = tmp2 * fjac[i + 1][j][k][1][1] - tmp1 * njac[i + 1][j][k][1][1] - tmp1 * dx2;\n lhs[i][j][k][2][1][2] = tmp2 * fjac[i + 1][j][k][1][2] - tmp1 * njac[i + 1][j][k][1][2];\n lhs[i][j][k][2][1][3] = tmp2 * fjac[i + 1][j][k][1][3] - tmp1 * njac[i + 1][j][k][1][3];\n lhs[i][j][k][2][1][4] = tmp2 * fjac[i + 1][j][k][1][4] - tmp1 * njac[i + 1][j][k][1][4];\n lhs[i][j][k][2][2][0] = tmp2 * fjac[i + 1][j][k][2][0] - tmp1 * njac[i + 1][j][k][2][0];\n lhs[i][j][k][2][2][1] = tmp2 * fjac[i + 1][j][k][2][1] - tmp1 * njac[i + 1][j][k][2][1];\n lhs[i][j][k][2][2][2] = tmp2 * fjac[i + 1][j][k][2][2] - tmp1 * njac[i + 1][j][k][2][2] - tmp1 * dx3;\n lhs[i][j][k][2][2][3] = tmp2 * fjac[i + 1][j][k][2][3] - tmp1 * njac[i + 1][j][k][2][3];\n lhs[i][j][k][2][2][4] = tmp2 * fjac[i + 1][j][k][2][4] - tmp1 * njac[i + 1][j][k][2][4];\n lhs[i][j][k][2][3][0] = tmp2 * fjac[i + 1][j][k][3][0] - tmp1 * njac[i + 1][j][k][3][0];\n lhs[i][j][k][2][3][1] = tmp2 * fjac[i + 1][j][k][3][1] - tmp1 * njac[i + 1][j][k][3][1];\n lhs[i][j][k][2][3][2] = tmp2 * fjac[i + 1][j][k][3][2] - tmp1 * njac[i + 1][j][k][3][2];\n lhs[i][j][k][2][3][3] = tmp2 * fjac[i + 1][j][k][3][3] - tmp1 * njac[i + 1][j][k][3][3] - tmp1 * dx4;\n lhs[i][j][k][2][3][4] = tmp2 * fjac[i + 1][j][k][3][4] - tmp1 * njac[i + 1][j][k][3][4];\n lhs[i][j][k][2][4][0] = tmp2 * fjac[i + 1][j][k][4][0] - tmp1 * njac[i + 1][j][k][4][0];\n lhs[i][j][k][2][4][1] = tmp2 * fjac[i + 1][j][k][4][1] - tmp1 * njac[i + 1][j][k][4][1];\n lhs[i][j][k][2][4][2] = tmp2 * fjac[i + 1][j][k][4][2] - tmp1 * njac[i + 1][j][k][4][2];\n lhs[i][j][k][2][4][3] = tmp2 * fjac[i + 1][j][k][4][3] - tmp1 * njac[i + 1][j][k][4][3];\n lhs[i][j][k][2][4][4] = tmp2 * fjac[i + 1][j][k][4][4] - tmp1 * njac[i + 1][j][k][4][4] - tmp1 * dx5;\n }\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,i) firstprivate (c3c4,c1345,c1,c2,con43)", "context_chars": 100, "text": "llel for private (tmp1,tmp2,tmp3,i,k)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n/*--------------------------------------------------------------------\nc \nc-------------------------------------------------------------------*/\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 1.0;\n fjac[i][j][k][0][2] = 0.0;\n fjac[i][j][k][0][3] = 0.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][1][1] = (2.0 - c2) * (u[i][j][k][1] / u[i][j][k][0]);\n fjac[i][j][k][1][2] = -c2 * (u[i][j][k][2] * tmp1);\n fjac[i][j][k][1][3] = -c2 * (u[i][j][k][3] * tmp1);\n fjac[i][j][k][1][4] = c2;\n fjac[i][j][k][2][0] = -(u[i][j][k][1] * u[i][j][k][2]) * tmp2;\n fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][2][3] = 0.0;\n fjac[i][j][k][2][4] = 0.0;\n fjac[i][j][k][3][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][2] = 0.0;\n fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][3][4] = 0.0;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][1] * tmp1);\n fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * (3.0 * u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][1]) * tmp2;\n fjac[i][j][k][4][3] = -c2 * (u[i][j][k][3] * u[i][j][k][1]) * tmp2;\n fjac[i][j][k][4][4] = c1 * (u[i][j][k][1] * tmp1);\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -con43 * c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = con43 * c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = c3c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,i) firstprivate (c3c4,c1345,c1,c2,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,i) firstprivate (tx1,tx2,dx1,dx2,dx3,dx4,dx5,dt)", "context_chars": 100, "text": "d side in x direction\nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n tmp1 = dt * tx1;\n tmp2 = dt * tx2;\n lhs[i][j][k][0][0][0] = -tmp2 * fjac[i - 1][j][k][0][0] - tmp1 * njac[i - 1][j][k][0][0] - tmp1 * dx1;\n lhs[i][j][k][0][0][1] = -tmp2 * fjac[i - 1][j][k][0][1] - tmp1 * njac[i - 1][j][k][0][1];\n lhs[i][j][k][0][0][2] = -tmp2 * fjac[i - 1][j][k][0][2] - tmp1 * njac[i - 1][j][k][0][2];\n lhs[i][j][k][0][0][3] = -tmp2 * fjac[i - 1][j][k][0][3] - tmp1 * njac[i - 1][j][k][0][3];\n lhs[i][j][k][0][0][4] = -tmp2 * fjac[i - 1][j][k][0][4] - tmp1 * njac[i - 1][j][k][0][4];\n lhs[i][j][k][0][1][0] = -tmp2 * fjac[i - 1][j][k][1][0] - tmp1 * njac[i - 1][j][k][1][0];\n lhs[i][j][k][0][1][1] = -tmp2 * fjac[i - 1][j][k][1][1] - tmp1 * njac[i - 1][j][k][1][1] - tmp1 * dx2;\n lhs[i][j][k][0][1][2] = -tmp2 * fjac[i - 1][j][k][1][2] - tmp1 * njac[i - 1][j][k][1][2];\n lhs[i][j][k][0][1][3] = -tmp2 * fjac[i - 1][j][k][1][3] - tmp1 * njac[i - 1][j][k][1][3];\n lhs[i][j][k][0][1][4] = -tmp2 * fjac[i - 1][j][k][1][4] - tmp1 * njac[i - 1][j][k][1][4];\n lhs[i][j][k][0][2][0] = -tmp2 * fjac[i - 1][j][k][2][0] - tmp1 * njac[i - 1][j][k][2][0];\n lhs[i][j][k][0][2][1] = -tmp2 * fjac[i - 1][j][k][2][1] - tmp1 * njac[i - 1][j][k][2][1];\n lhs[i][j][k][0][2][2] = -tmp2 * fjac[i - 1][j][k][2][2] - tmp1 * njac[i - 1][j][k][2][2] - tmp1 * dx3;\n lhs[i][j][k][0][2][3] = -tmp2 * fjac[i - 1][j][k][2][3] - tmp1 * njac[i - 1][j][k][2][3];\n lhs[i][j][k][0][2][4] = -tmp2 * fjac[i - 1][j][k][2][4] - tmp1 * njac[i - 1][j][k][2][4];\n lhs[i][j][k][0][3][0] = -tmp2 * fjac[i - 1][j][k][3][0] - tmp1 * njac[i - 1][j][k][3][0];\n lhs[i][j][k][0][3][1] = -tmp2 * fjac[i - 1][j][k][3][1] - tmp1 * njac[i - 1][j][k][3][1];\n lhs[i][j][k][0][3][2] = -tmp2 * fjac[i - 1][j][k][3][2] - tmp1 * njac[i - 1][j][k][3][2];\n lhs[i][j][k][0][3][3] = -tmp2 * fjac[i - 1][j][k][3][3] - tmp1 * njac[i - 1][j][k][3][3] - tmp1 * dx4;\n lhs[i][j][k][0][3][4] = -tmp2 * fjac[i - 1][j][k][3][4] - tmp1 * njac[i - 1][j][k][3][4];\n lhs[i][j][k][0][4][0] = -tmp2 * fjac[i - 1][j][k][4][0] - tmp1 * njac[i - 1][j][k][4][0];\n lhs[i][j][k][0][4][1] = -tmp2 * fjac[i - 1][j][k][4][1] - tmp1 * njac[i - 1][j][k][4][1];\n lhs[i][j][k][0][4][2] = -tmp2 * fjac[i - 1][j][k][4][2] - tmp1 * njac[i - 1][j][k][4][2];\n lhs[i][j][k][0][4][3] = -tmp2 * fjac[i - 1][j][k][4][3] - tmp1 * njac[i - 1][j][k][4][3];\n lhs[i][j][k][0][4][4] = -tmp2 * fjac[i - 1][j][k][4][4] - tmp1 * njac[i - 1][j][k][4][4] - tmp1 * dx5;\n lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1;\n lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2;\n lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3;\n lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4;\n lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5;\n lhs[i][j][k][2][0][0] = tmp2 * fjac[i + 1][j][k][0][0] - tmp1 * njac[i + 1][j][k][0][0] - tmp1 * dx1;\n lhs[i][j][k][2][0][1] = tmp2 * fjac[i + 1][j][k][0][1] - tmp1 * njac[i + 1][j][k][0][1];\n lhs[i][j][k][2][0][2] = tmp2 * fjac[i + 1][j][k][0][2] - tmp1 * njac[i + 1][j][k][0][2];\n lhs[i][j][k][2][0][3] = tmp2 * fjac[i + 1][j][k][0][3] - tmp1 * njac[i + 1][j][k][0][3];\n lhs[i][j][k][2][0][4] = tmp2 * fjac[i + 1][j][k][0][4] - tmp1 * njac[i + 1][j][k][0][4];\n lhs[i][j][k][2][1][0] = tmp2 * fjac[i + 1][j][k][1][0] - tmp1 * njac[i + 1][j][k][1][0];\n lhs[i][j][k][2][1][1] = tmp2 * fjac[i + 1][j][k][1][1] - tmp1 * njac[i + 1][j][k][1][1] - tmp1 * dx2;\n lhs[i][j][k][2][1][2] = tmp2 * fjac[i + 1][j][k][1][2] - tmp1 * njac[i + 1][j][k][1][2];\n lhs[i][j][k][2][1][3] = tmp2 * fjac[i + 1][j][k][1][3] - tmp1 * njac[i + 1][j][k][1][3];\n lhs[i][j][k][2][1][4] = tmp2 * fjac[i + 1][j][k][1][4] - tmp1 * njac[i + 1][j][k][1][4];\n lhs[i][j][k][2][2][0] = tmp2 * fjac[i + 1][j][k][2][0] - tmp1 * njac[i + 1][j][k][2][0];\n lhs[i][j][k][2][2][1] = tmp2 * fjac[i + 1][j][k][2][1] - tmp1 * njac[i + 1][j][k][2][1];\n lhs[i][j][k][2][2][2] = tmp2 * fjac[i + 1][j][k][2][2] - tmp1 * njac[i + 1][j][k][2][2] - tmp1 * dx3;\n lhs[i][j][k][2][2][3] = tmp2 * fjac[i + 1][j][k][2][3] - tmp1 * njac[i + 1][j][k][2][3];\n lhs[i][j][k][2][2][4] = tmp2 * fjac[i + 1][j][k][2][4] - tmp1 * njac[i + 1][j][k][2][4];\n lhs[i][j][k][2][3][0] = tmp2 * fjac[i + 1][j][k][3][0] - tmp1 * njac[i + 1][j][k][3][0];\n lhs[i][j][k][2][3][1] = tmp2 * fjac[i + 1][j][k][3][1] - tmp1 * njac[i + 1][j][k][3][1];\n lhs[i][j][k][2][3][2] = tmp2 * fjac[i + 1][j][k][3][2] - tmp1 * njac[i + 1][j][k][3][2];\n lhs[i][j][k][2][3][3] = tmp2 * fjac[i + 1][j][k][3][3] - tmp1 * njac[i + 1][j][k][3][3] - tmp1 * dx4;\n lhs[i][j][k][2][3][4] = tmp2 * fjac[i + 1][j][k][3][4] - tmp1 * njac[i + 1][j][k][3][4];\n lhs[i][j][k][2][4][0] = tmp2 * fjac[i + 1][j][k][4][0] - tmp1 * njac[i + 1][j][k][4][0];\n lhs[i][j][k][2][4][1] = tmp2 * fjac[i + 1][j][k][4][1] - tmp1 * njac[i + 1][j][k][4][1];\n lhs[i][j][k][2][4][2] = tmp2 * fjac[i + 1][j][k][4][2] - tmp1 * njac[i + 1][j][k][4][2];\n lhs[i][j][k][2][4][3] = tmp2 * fjac[i + 1][j][k][4][3] - tmp1 * njac[i + 1][j][k][4][3];\n lhs[i][j][k][2][4][4] = tmp2 * fjac[i + 1][j][k][4][4] - tmp1 * njac[i + 1][j][k][4][4] - tmp1 * dx5;\n } #pragma omp parallel for private (tmp1,tmp2,i) firstprivate (tx1,tx2,dx1,dx2,dx3,dx4,dx5,dt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,i,j,k)", "context_chars": 100, "text": "nd n jacobians for cell c\nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,k) firstprivate (c3c4,c1345,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 0.0;\n fjac[i][j][k][0][2] = 1.0;\n fjac[i][j][k][0][3] = 0.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * u[i][j][k][2]) * tmp2;\n fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][1][3] = 0.0;\n fjac[i][j][k][1][4] = 0.0;\n fjac[i][j][k][2][0] = -(u[i][j][k][2] * u[i][j][k][2] * tmp2) + 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][2][1] = -c2 * u[i][j][k][1] * tmp1;\n fjac[i][j][k][2][2] = (2.0 - c2) * u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][3] = -c2 * u[i][j][k][3] * tmp1;\n fjac[i][j][k][2][4] = c2;\n fjac[i][j][k][3][0] = -(u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][3][1] = 0.0;\n fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][3][4] = 0.0;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * u[i][j][k][4] * tmp1) * u[i][j][k][2] * tmp1;\n fjac[i][j][k][4][1] = -c2 * u[i][j][k][1] * u[i][j][k][2] * tmp2;\n fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + 3.0 * u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][4][3] = -c2 * (u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1;\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -con43 * c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = con43 * c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = c3c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n }\n }\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,j,k)", "context_chars": 100, "text": "rallel for private (tmp1,tmp2,tmp3,i,j,k)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,k) firstprivate (c3c4,c1345,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 0.0;\n fjac[i][j][k][0][2] = 1.0;\n fjac[i][j][k][0][3] = 0.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * u[i][j][k][2]) * tmp2;\n fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][1][3] = 0.0;\n fjac[i][j][k][1][4] = 0.0;\n fjac[i][j][k][2][0] = -(u[i][j][k][2] * u[i][j][k][2] * tmp2) + 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][2][1] = -c2 * u[i][j][k][1] * tmp1;\n fjac[i][j][k][2][2] = (2.0 - c2) * u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][3] = -c2 * u[i][j][k][3] * tmp1;\n fjac[i][j][k][2][4] = c2;\n fjac[i][j][k][3][0] = -(u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][3][1] = 0.0;\n fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][3][4] = 0.0;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * u[i][j][k][4] * tmp1) * u[i][j][k][2] * tmp1;\n fjac[i][j][k][4][1] = -c2 * u[i][j][k][1] * u[i][j][k][2] * tmp2;\n fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + 3.0 * u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][4][3] = -c2 * (u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1;\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -con43 * c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = con43 * c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = c3c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n }\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,k) firstprivate (c3c4,c1345,c1,c2,con43)", "context_chars": 100, "text": "parallel for private (tmp1,tmp2,tmp3,j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 0.0;\n fjac[i][j][k][0][2] = 1.0;\n fjac[i][j][k][0][3] = 0.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * u[i][j][k][2]) * tmp2;\n fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][1][3] = 0.0;\n fjac[i][j][k][1][4] = 0.0;\n fjac[i][j][k][2][0] = -(u[i][j][k][2] * u[i][j][k][2] * tmp2) + 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][2][1] = -c2 * u[i][j][k][1] * tmp1;\n fjac[i][j][k][2][2] = (2.0 - c2) * u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][3] = -c2 * u[i][j][k][3] * tmp1;\n fjac[i][j][k][2][4] = c2;\n fjac[i][j][k][3][0] = -(u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][3][1] = 0.0;\n fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][3][4] = 0.0;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * u[i][j][k][4] * tmp1) * u[i][j][k][2] * tmp1;\n fjac[i][j][k][4][1] = -c2 * u[i][j][k][1] * u[i][j][k][2] * tmp2;\n fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + 3.0 * u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][4][3] = -c2 * (u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1;\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -con43 * c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = con43 * c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = c3c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,k) firstprivate (c3c4,c1345,c1,c2,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,i,j,k)", "context_chars": 100, "text": " hand side in y direction\nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,k) firstprivate (ty1,ty2,dy1,dy2,dy3,dy4,dy5,dt)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n tmp1 = dt * ty1;\n tmp2 = dt * ty2;\n lhs[i][j][k][0][0][0] = -tmp2 * fjac[i][j - 1][k][0][0] - tmp1 * njac[i][j - 1][k][0][0] - tmp1 * dy1;\n lhs[i][j][k][0][0][1] = -tmp2 * fjac[i][j - 1][k][0][1] - tmp1 * njac[i][j - 1][k][0][1];\n lhs[i][j][k][0][0][2] = -tmp2 * fjac[i][j - 1][k][0][2] - tmp1 * njac[i][j - 1][k][0][2];\n lhs[i][j][k][0][0][3] = -tmp2 * fjac[i][j - 1][k][0][3] - tmp1 * njac[i][j - 1][k][0][3];\n lhs[i][j][k][0][0][4] = -tmp2 * fjac[i][j - 1][k][0][4] - tmp1 * njac[i][j - 1][k][0][4];\n lhs[i][j][k][0][1][0] = -tmp2 * fjac[i][j - 1][k][1][0] - tmp1 * njac[i][j - 1][k][1][0];\n lhs[i][j][k][0][1][1] = -tmp2 * fjac[i][j - 1][k][1][1] - tmp1 * njac[i][j - 1][k][1][1] - tmp1 * dy2;\n lhs[i][j][k][0][1][2] = -tmp2 * fjac[i][j - 1][k][1][2] - tmp1 * njac[i][j - 1][k][1][2];\n lhs[i][j][k][0][1][3] = -tmp2 * fjac[i][j - 1][k][1][3] - tmp1 * njac[i][j - 1][k][1][3];\n lhs[i][j][k][0][1][4] = -tmp2 * fjac[i][j - 1][k][1][4] - tmp1 * njac[i][j - 1][k][1][4];\n lhs[i][j][k][0][2][0] = -tmp2 * fjac[i][j - 1][k][2][0] - tmp1 * njac[i][j - 1][k][2][0];\n lhs[i][j][k][0][2][1] = -tmp2 * fjac[i][j - 1][k][2][1] - tmp1 * njac[i][j - 1][k][2][1];\n lhs[i][j][k][0][2][2] = -tmp2 * fjac[i][j - 1][k][2][2] - tmp1 * njac[i][j - 1][k][2][2] - tmp1 * dy3;\n lhs[i][j][k][0][2][3] = -tmp2 * fjac[i][j - 1][k][2][3] - tmp1 * njac[i][j - 1][k][2][3];\n lhs[i][j][k][0][2][4] = -tmp2 * fjac[i][j - 1][k][2][4] - tmp1 * njac[i][j - 1][k][2][4];\n lhs[i][j][k][0][3][0] = -tmp2 * fjac[i][j - 1][k][3][0] - tmp1 * njac[i][j - 1][k][3][0];\n lhs[i][j][k][0][3][1] = -tmp2 * fjac[i][j - 1][k][3][1] - tmp1 * njac[i][j - 1][k][3][1];\n lhs[i][j][k][0][3][2] = -tmp2 * fjac[i][j - 1][k][3][2] - tmp1 * njac[i][j - 1][k][3][2];\n lhs[i][j][k][0][3][3] = -tmp2 * fjac[i][j - 1][k][3][3] - tmp1 * njac[i][j - 1][k][3][3] - tmp1 * dy4;\n lhs[i][j][k][0][3][4] = -tmp2 * fjac[i][j - 1][k][3][4] - tmp1 * njac[i][j - 1][k][3][4];\n lhs[i][j][k][0][4][0] = -tmp2 * fjac[i][j - 1][k][4][0] - tmp1 * njac[i][j - 1][k][4][0];\n lhs[i][j][k][0][4][1] = -tmp2 * fjac[i][j - 1][k][4][1] - tmp1 * njac[i][j - 1][k][4][1];\n lhs[i][j][k][0][4][2] = -tmp2 * fjac[i][j - 1][k][4][2] - tmp1 * njac[i][j - 1][k][4][2];\n lhs[i][j][k][0][4][3] = -tmp2 * fjac[i][j - 1][k][4][3] - tmp1 * njac[i][j - 1][k][4][3];\n lhs[i][j][k][0][4][4] = -tmp2 * fjac[i][j - 1][k][4][4] - tmp1 * njac[i][j - 1][k][4][4] - tmp1 * dy5;\n lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dy1;\n lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dy2;\n lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dy3;\n lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dy4;\n lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dy5;\n lhs[i][j][k][2][0][0] = tmp2 * fjac[i][j + 1][k][0][0] - tmp1 * njac[i][j + 1][k][0][0] - tmp1 * dy1;\n lhs[i][j][k][2][0][1] = tmp2 * fjac[i][j + 1][k][0][1] - tmp1 * njac[i][j + 1][k][0][1];\n lhs[i][j][k][2][0][2] = tmp2 * fjac[i][j + 1][k][0][2] - tmp1 * njac[i][j + 1][k][0][2];\n lhs[i][j][k][2][0][3] = tmp2 * fjac[i][j + 1][k][0][3] - tmp1 * njac[i][j + 1][k][0][3];\n lhs[i][j][k][2][0][4] = tmp2 * fjac[i][j + 1][k][0][4] - tmp1 * njac[i][j + 1][k][0][4];\n lhs[i][j][k][2][1][0] = tmp2 * fjac[i][j + 1][k][1][0] - tmp1 * njac[i][j + 1][k][1][0];\n lhs[i][j][k][2][1][1] = tmp2 * fjac[i][j + 1][k][1][1] - tmp1 * njac[i][j + 1][k][1][1] - tmp1 * dy2;\n lhs[i][j][k][2][1][2] = tmp2 * fjac[i][j + 1][k][1][2] - tmp1 * njac[i][j + 1][k][1][2];\n lhs[i][j][k][2][1][3] = tmp2 * fjac[i][j + 1][k][1][3] - tmp1 * njac[i][j + 1][k][1][3];\n lhs[i][j][k][2][1][4] = tmp2 * fjac[i][j + 1][k][1][4] - tmp1 * njac[i][j + 1][k][1][4];\n lhs[i][j][k][2][2][0] = tmp2 * fjac[i][j + 1][k][2][0] - tmp1 * njac[i][j + 1][k][2][0];\n lhs[i][j][k][2][2][1] = tmp2 * fjac[i][j + 1][k][2][1] - tmp1 * njac[i][j + 1][k][2][1];\n lhs[i][j][k][2][2][2] = tmp2 * fjac[i][j + 1][k][2][2] - tmp1 * njac[i][j + 1][k][2][2] - tmp1 * dy3;\n lhs[i][j][k][2][2][3] = tmp2 * fjac[i][j + 1][k][2][3] - tmp1 * njac[i][j + 1][k][2][3];\n lhs[i][j][k][2][2][4] = tmp2 * fjac[i][j + 1][k][2][4] - tmp1 * njac[i][j + 1][k][2][4];\n lhs[i][j][k][2][3][0] = tmp2 * fjac[i][j + 1][k][3][0] - tmp1 * njac[i][j + 1][k][3][0];\n lhs[i][j][k][2][3][1] = tmp2 * fjac[i][j + 1][k][3][1] - tmp1 * njac[i][j + 1][k][3][1];\n lhs[i][j][k][2][3][2] = tmp2 * fjac[i][j + 1][k][3][2] - tmp1 * njac[i][j + 1][k][3][2];\n lhs[i][j][k][2][3][3] = tmp2 * fjac[i][j + 1][k][3][3] - tmp1 * njac[i][j + 1][k][3][3] - tmp1 * dy4;\n lhs[i][j][k][2][3][4] = tmp2 * fjac[i][j + 1][k][3][4] - tmp1 * njac[i][j + 1][k][3][4];\n lhs[i][j][k][2][4][0] = tmp2 * fjac[i][j + 1][k][4][0] - tmp1 * njac[i][j + 1][k][4][0];\n lhs[i][j][k][2][4][1] = tmp2 * fjac[i][j + 1][k][4][1] - tmp1 * njac[i][j + 1][k][4][1];\n lhs[i][j][k][2][4][2] = tmp2 * fjac[i][j + 1][k][4][2] - tmp1 * njac[i][j + 1][k][4][2];\n lhs[i][j][k][2][4][3] = tmp2 * fjac[i][j + 1][k][4][3] - tmp1 * njac[i][j + 1][k][4][3];\n lhs[i][j][k][2][4][4] = tmp2 * fjac[i][j + 1][k][4][4] - tmp1 * njac[i][j + 1][k][4][4] - tmp1 * dy5;\n }\n }\n } #pragma omp parallel for private (tmp1,tmp2,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,j,k)", "context_chars": 100, "text": "mp parallel for private (tmp1,tmp2,i,j,k)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,k) firstprivate (ty1,ty2,dy1,dy2,dy3,dy4,dy5,dt)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n tmp1 = dt * ty1;\n tmp2 = dt * ty2;\n lhs[i][j][k][0][0][0] = -tmp2 * fjac[i][j - 1][k][0][0] - tmp1 * njac[i][j - 1][k][0][0] - tmp1 * dy1;\n lhs[i][j][k][0][0][1] = -tmp2 * fjac[i][j - 1][k][0][1] - tmp1 * njac[i][j - 1][k][0][1];\n lhs[i][j][k][0][0][2] = -tmp2 * fjac[i][j - 1][k][0][2] - tmp1 * njac[i][j - 1][k][0][2];\n lhs[i][j][k][0][0][3] = -tmp2 * fjac[i][j - 1][k][0][3] - tmp1 * njac[i][j - 1][k][0][3];\n lhs[i][j][k][0][0][4] = -tmp2 * fjac[i][j - 1][k][0][4] - tmp1 * njac[i][j - 1][k][0][4];\n lhs[i][j][k][0][1][0] = -tmp2 * fjac[i][j - 1][k][1][0] - tmp1 * njac[i][j - 1][k][1][0];\n lhs[i][j][k][0][1][1] = -tmp2 * fjac[i][j - 1][k][1][1] - tmp1 * njac[i][j - 1][k][1][1] - tmp1 * dy2;\n lhs[i][j][k][0][1][2] = -tmp2 * fjac[i][j - 1][k][1][2] - tmp1 * njac[i][j - 1][k][1][2];\n lhs[i][j][k][0][1][3] = -tmp2 * fjac[i][j - 1][k][1][3] - tmp1 * njac[i][j - 1][k][1][3];\n lhs[i][j][k][0][1][4] = -tmp2 * fjac[i][j - 1][k][1][4] - tmp1 * njac[i][j - 1][k][1][4];\n lhs[i][j][k][0][2][0] = -tmp2 * fjac[i][j - 1][k][2][0] - tmp1 * njac[i][j - 1][k][2][0];\n lhs[i][j][k][0][2][1] = -tmp2 * fjac[i][j - 1][k][2][1] - tmp1 * njac[i][j - 1][k][2][1];\n lhs[i][j][k][0][2][2] = -tmp2 * fjac[i][j - 1][k][2][2] - tmp1 * njac[i][j - 1][k][2][2] - tmp1 * dy3;\n lhs[i][j][k][0][2][3] = -tmp2 * fjac[i][j - 1][k][2][3] - tmp1 * njac[i][j - 1][k][2][3];\n lhs[i][j][k][0][2][4] = -tmp2 * fjac[i][j - 1][k][2][4] - tmp1 * njac[i][j - 1][k][2][4];\n lhs[i][j][k][0][3][0] = -tmp2 * fjac[i][j - 1][k][3][0] - tmp1 * njac[i][j - 1][k][3][0];\n lhs[i][j][k][0][3][1] = -tmp2 * fjac[i][j - 1][k][3][1] - tmp1 * njac[i][j - 1][k][3][1];\n lhs[i][j][k][0][3][2] = -tmp2 * fjac[i][j - 1][k][3][2] - tmp1 * njac[i][j - 1][k][3][2];\n lhs[i][j][k][0][3][3] = -tmp2 * fjac[i][j - 1][k][3][3] - tmp1 * njac[i][j - 1][k][3][3] - tmp1 * dy4;\n lhs[i][j][k][0][3][4] = -tmp2 * fjac[i][j - 1][k][3][4] - tmp1 * njac[i][j - 1][k][3][4];\n lhs[i][j][k][0][4][0] = -tmp2 * fjac[i][j - 1][k][4][0] - tmp1 * njac[i][j - 1][k][4][0];\n lhs[i][j][k][0][4][1] = -tmp2 * fjac[i][j - 1][k][4][1] - tmp1 * njac[i][j - 1][k][4][1];\n lhs[i][j][k][0][4][2] = -tmp2 * fjac[i][j - 1][k][4][2] - tmp1 * njac[i][j - 1][k][4][2];\n lhs[i][j][k][0][4][3] = -tmp2 * fjac[i][j - 1][k][4][3] - tmp1 * njac[i][j - 1][k][4][3];\n lhs[i][j][k][0][4][4] = -tmp2 * fjac[i][j - 1][k][4][4] - tmp1 * njac[i][j - 1][k][4][4] - tmp1 * dy5;\n lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dy1;\n lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dy2;\n lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dy3;\n lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dy4;\n lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dy5;\n lhs[i][j][k][2][0][0] = tmp2 * fjac[i][j + 1][k][0][0] - tmp1 * njac[i][j + 1][k][0][0] - tmp1 * dy1;\n lhs[i][j][k][2][0][1] = tmp2 * fjac[i][j + 1][k][0][1] - tmp1 * njac[i][j + 1][k][0][1];\n lhs[i][j][k][2][0][2] = tmp2 * fjac[i][j + 1][k][0][2] - tmp1 * njac[i][j + 1][k][0][2];\n lhs[i][j][k][2][0][3] = tmp2 * fjac[i][j + 1][k][0][3] - tmp1 * njac[i][j + 1][k][0][3];\n lhs[i][j][k][2][0][4] = tmp2 * fjac[i][j + 1][k][0][4] - tmp1 * njac[i][j + 1][k][0][4];\n lhs[i][j][k][2][1][0] = tmp2 * fjac[i][j + 1][k][1][0] - tmp1 * njac[i][j + 1][k][1][0];\n lhs[i][j][k][2][1][1] = tmp2 * fjac[i][j + 1][k][1][1] - tmp1 * njac[i][j + 1][k][1][1] - tmp1 * dy2;\n lhs[i][j][k][2][1][2] = tmp2 * fjac[i][j + 1][k][1][2] - tmp1 * njac[i][j + 1][k][1][2];\n lhs[i][j][k][2][1][3] = tmp2 * fjac[i][j + 1][k][1][3] - tmp1 * njac[i][j + 1][k][1][3];\n lhs[i][j][k][2][1][4] = tmp2 * fjac[i][j + 1][k][1][4] - tmp1 * njac[i][j + 1][k][1][4];\n lhs[i][j][k][2][2][0] = tmp2 * fjac[i][j + 1][k][2][0] - tmp1 * njac[i][j + 1][k][2][0];\n lhs[i][j][k][2][2][1] = tmp2 * fjac[i][j + 1][k][2][1] - tmp1 * njac[i][j + 1][k][2][1];\n lhs[i][j][k][2][2][2] = tmp2 * fjac[i][j + 1][k][2][2] - tmp1 * njac[i][j + 1][k][2][2] - tmp1 * dy3;\n lhs[i][j][k][2][2][3] = tmp2 * fjac[i][j + 1][k][2][3] - tmp1 * njac[i][j + 1][k][2][3];\n lhs[i][j][k][2][2][4] = tmp2 * fjac[i][j + 1][k][2][4] - tmp1 * njac[i][j + 1][k][2][4];\n lhs[i][j][k][2][3][0] = tmp2 * fjac[i][j + 1][k][3][0] - tmp1 * njac[i][j + 1][k][3][0];\n lhs[i][j][k][2][3][1] = tmp2 * fjac[i][j + 1][k][3][1] - tmp1 * njac[i][j + 1][k][3][1];\n lhs[i][j][k][2][3][2] = tmp2 * fjac[i][j + 1][k][3][2] - tmp1 * njac[i][j + 1][k][3][2];\n lhs[i][j][k][2][3][3] = tmp2 * fjac[i][j + 1][k][3][3] - tmp1 * njac[i][j + 1][k][3][3] - tmp1 * dy4;\n lhs[i][j][k][2][3][4] = tmp2 * fjac[i][j + 1][k][3][4] - tmp1 * njac[i][j + 1][k][3][4];\n lhs[i][j][k][2][4][0] = tmp2 * fjac[i][j + 1][k][4][0] - tmp1 * njac[i][j + 1][k][4][0];\n lhs[i][j][k][2][4][1] = tmp2 * fjac[i][j + 1][k][4][1] - tmp1 * njac[i][j + 1][k][4][1];\n lhs[i][j][k][2][4][2] = tmp2 * fjac[i][j + 1][k][4][2] - tmp1 * njac[i][j + 1][k][4][2];\n lhs[i][j][k][2][4][3] = tmp2 * fjac[i][j + 1][k][4][3] - tmp1 * njac[i][j + 1][k][4][3];\n lhs[i][j][k][2][4][4] = tmp2 * fjac[i][j + 1][k][4][4] - tmp1 * njac[i][j + 1][k][4][4] - tmp1 * dy5;\n }\n } #pragma omp parallel for private (tmp1,tmp2,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,k) firstprivate (ty1,ty2,dy1,dy2,dy3,dy4,dy5,dt)", "context_chars": 100, "text": " parallel for private (tmp1,tmp2,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n tmp1 = dt * ty1;\n tmp2 = dt * ty2;\n lhs[i][j][k][0][0][0] = -tmp2 * fjac[i][j - 1][k][0][0] - tmp1 * njac[i][j - 1][k][0][0] - tmp1 * dy1;\n lhs[i][j][k][0][0][1] = -tmp2 * fjac[i][j - 1][k][0][1] - tmp1 * njac[i][j - 1][k][0][1];\n lhs[i][j][k][0][0][2] = -tmp2 * fjac[i][j - 1][k][0][2] - tmp1 * njac[i][j - 1][k][0][2];\n lhs[i][j][k][0][0][3] = -tmp2 * fjac[i][j - 1][k][0][3] - tmp1 * njac[i][j - 1][k][0][3];\n lhs[i][j][k][0][0][4] = -tmp2 * fjac[i][j - 1][k][0][4] - tmp1 * njac[i][j - 1][k][0][4];\n lhs[i][j][k][0][1][0] = -tmp2 * fjac[i][j - 1][k][1][0] - tmp1 * njac[i][j - 1][k][1][0];\n lhs[i][j][k][0][1][1] = -tmp2 * fjac[i][j - 1][k][1][1] - tmp1 * njac[i][j - 1][k][1][1] - tmp1 * dy2;\n lhs[i][j][k][0][1][2] = -tmp2 * fjac[i][j - 1][k][1][2] - tmp1 * njac[i][j - 1][k][1][2];\n lhs[i][j][k][0][1][3] = -tmp2 * fjac[i][j - 1][k][1][3] - tmp1 * njac[i][j - 1][k][1][3];\n lhs[i][j][k][0][1][4] = -tmp2 * fjac[i][j - 1][k][1][4] - tmp1 * njac[i][j - 1][k][1][4];\n lhs[i][j][k][0][2][0] = -tmp2 * fjac[i][j - 1][k][2][0] - tmp1 * njac[i][j - 1][k][2][0];\n lhs[i][j][k][0][2][1] = -tmp2 * fjac[i][j - 1][k][2][1] - tmp1 * njac[i][j - 1][k][2][1];\n lhs[i][j][k][0][2][2] = -tmp2 * fjac[i][j - 1][k][2][2] - tmp1 * njac[i][j - 1][k][2][2] - tmp1 * dy3;\n lhs[i][j][k][0][2][3] = -tmp2 * fjac[i][j - 1][k][2][3] - tmp1 * njac[i][j - 1][k][2][3];\n lhs[i][j][k][0][2][4] = -tmp2 * fjac[i][j - 1][k][2][4] - tmp1 * njac[i][j - 1][k][2][4];\n lhs[i][j][k][0][3][0] = -tmp2 * fjac[i][j - 1][k][3][0] - tmp1 * njac[i][j - 1][k][3][0];\n lhs[i][j][k][0][3][1] = -tmp2 * fjac[i][j - 1][k][3][1] - tmp1 * njac[i][j - 1][k][3][1];\n lhs[i][j][k][0][3][2] = -tmp2 * fjac[i][j - 1][k][3][2] - tmp1 * njac[i][j - 1][k][3][2];\n lhs[i][j][k][0][3][3] = -tmp2 * fjac[i][j - 1][k][3][3] - tmp1 * njac[i][j - 1][k][3][3] - tmp1 * dy4;\n lhs[i][j][k][0][3][4] = -tmp2 * fjac[i][j - 1][k][3][4] - tmp1 * njac[i][j - 1][k][3][4];\n lhs[i][j][k][0][4][0] = -tmp2 * fjac[i][j - 1][k][4][0] - tmp1 * njac[i][j - 1][k][4][0];\n lhs[i][j][k][0][4][1] = -tmp2 * fjac[i][j - 1][k][4][1] - tmp1 * njac[i][j - 1][k][4][1];\n lhs[i][j][k][0][4][2] = -tmp2 * fjac[i][j - 1][k][4][2] - tmp1 * njac[i][j - 1][k][4][2];\n lhs[i][j][k][0][4][3] = -tmp2 * fjac[i][j - 1][k][4][3] - tmp1 * njac[i][j - 1][k][4][3];\n lhs[i][j][k][0][4][4] = -tmp2 * fjac[i][j - 1][k][4][4] - tmp1 * njac[i][j - 1][k][4][4] - tmp1 * dy5;\n lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dy1;\n lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dy2;\n lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dy3;\n lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dy4;\n lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dy5;\n lhs[i][j][k][2][0][0] = tmp2 * fjac[i][j + 1][k][0][0] - tmp1 * njac[i][j + 1][k][0][0] - tmp1 * dy1;\n lhs[i][j][k][2][0][1] = tmp2 * fjac[i][j + 1][k][0][1] - tmp1 * njac[i][j + 1][k][0][1];\n lhs[i][j][k][2][0][2] = tmp2 * fjac[i][j + 1][k][0][2] - tmp1 * njac[i][j + 1][k][0][2];\n lhs[i][j][k][2][0][3] = tmp2 * fjac[i][j + 1][k][0][3] - tmp1 * njac[i][j + 1][k][0][3];\n lhs[i][j][k][2][0][4] = tmp2 * fjac[i][j + 1][k][0][4] - tmp1 * njac[i][j + 1][k][0][4];\n lhs[i][j][k][2][1][0] = tmp2 * fjac[i][j + 1][k][1][0] - tmp1 * njac[i][j + 1][k][1][0];\n lhs[i][j][k][2][1][1] = tmp2 * fjac[i][j + 1][k][1][1] - tmp1 * njac[i][j + 1][k][1][1] - tmp1 * dy2;\n lhs[i][j][k][2][1][2] = tmp2 * fjac[i][j + 1][k][1][2] - tmp1 * njac[i][j + 1][k][1][2];\n lhs[i][j][k][2][1][3] = tmp2 * fjac[i][j + 1][k][1][3] - tmp1 * njac[i][j + 1][k][1][3];\n lhs[i][j][k][2][1][4] = tmp2 * fjac[i][j + 1][k][1][4] - tmp1 * njac[i][j + 1][k][1][4];\n lhs[i][j][k][2][2][0] = tmp2 * fjac[i][j + 1][k][2][0] - tmp1 * njac[i][j + 1][k][2][0];\n lhs[i][j][k][2][2][1] = tmp2 * fjac[i][j + 1][k][2][1] - tmp1 * njac[i][j + 1][k][2][1];\n lhs[i][j][k][2][2][2] = tmp2 * fjac[i][j + 1][k][2][2] - tmp1 * njac[i][j + 1][k][2][2] - tmp1 * dy3;\n lhs[i][j][k][2][2][3] = tmp2 * fjac[i][j + 1][k][2][3] - tmp1 * njac[i][j + 1][k][2][3];\n lhs[i][j][k][2][2][4] = tmp2 * fjac[i][j + 1][k][2][4] - tmp1 * njac[i][j + 1][k][2][4];\n lhs[i][j][k][2][3][0] = tmp2 * fjac[i][j + 1][k][3][0] - tmp1 * njac[i][j + 1][k][3][0];\n lhs[i][j][k][2][3][1] = tmp2 * fjac[i][j + 1][k][3][1] - tmp1 * njac[i][j + 1][k][3][1];\n lhs[i][j][k][2][3][2] = tmp2 * fjac[i][j + 1][k][3][2] - tmp1 * njac[i][j + 1][k][3][2];\n lhs[i][j][k][2][3][3] = tmp2 * fjac[i][j + 1][k][3][3] - tmp1 * njac[i][j + 1][k][3][3] - tmp1 * dy4;\n lhs[i][j][k][2][3][4] = tmp2 * fjac[i][j + 1][k][3][4] - tmp1 * njac[i][j + 1][k][3][4];\n lhs[i][j][k][2][4][0] = tmp2 * fjac[i][j + 1][k][4][0] - tmp1 * njac[i][j + 1][k][4][0];\n lhs[i][j][k][2][4][1] = tmp2 * fjac[i][j + 1][k][4][1] - tmp1 * njac[i][j + 1][k][4][1];\n lhs[i][j][k][2][4][2] = tmp2 * fjac[i][j + 1][k][4][2] - tmp1 * njac[i][j + 1][k][4][2];\n lhs[i][j][k][2][4][3] = tmp2 * fjac[i][j + 1][k][4][3] - tmp1 * njac[i][j + 1][k][4][3];\n lhs[i][j][k][2][4][4] = tmp2 * fjac[i][j + 1][k][4][4] - tmp1 * njac[i][j + 1][k][4][4] - tmp1 * dy5;\n } #pragma omp parallel for private (tmp1,tmp2,k) firstprivate (ty1,ty2,dy1,dy2,dy3,dy4,dy5,dt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(tmp1, tmp2, tmp3) ", "context_chars": 100, "text": "beled f) and s jacobians\nc---------------------------------------------------------------------*/\n//#pragma omp parallel for private (tmp1,tmp2,tmp3,i,j,k)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,k) firstprivate (c3c4,c1345,c1,c2,c3,c4,con43)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 0.0;\n fjac[i][j][k][0][2] = 0.0;\n fjac[i][j][k][0][3] = 1.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][1][2] = 0.0;\n fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][1][4] = 0.0;\n fjac[i][j][k][2][0] = -(u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][2][1] = 0.0;\n fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][4] = 0.0;\n fjac[i][j][k][3][0] = -(u[i][j][k][3] * u[i][j][k][3] * tmp2) + 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][3][1] = -c2 * u[i][j][k][1] * tmp1;\n fjac[i][j][k][3][2] = -c2 * u[i][j][k][2] * tmp1;\n fjac[i][j][k][3][3] = (2.0 - c2) * u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][4] = c2;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][3] * tmp1);\n fjac[i][j][k][4][1] = -c2 * (u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][3] = c1 * (u[i][j][k][4] * tmp1) - 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + 3.0 * u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -con43 * c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n }\n }\n } #pragma omp parallel for private(tmp1, tmp2, tmp3) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,i,j,k)", "context_chars": 100, "text": "----------------------------------------*/\n//#pragma omp parallel for private(tmp1, tmp2, tmp3) \n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,k) firstprivate (c3c4,c1345,c1,c2,c3,c4,con43)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 0.0;\n fjac[i][j][k][0][2] = 0.0;\n fjac[i][j][k][0][3] = 1.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][1][2] = 0.0;\n fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][1][4] = 0.0;\n fjac[i][j][k][2][0] = -(u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][2][1] = 0.0;\n fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][4] = 0.0;\n fjac[i][j][k][3][0] = -(u[i][j][k][3] * u[i][j][k][3] * tmp2) + 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][3][1] = -c2 * u[i][j][k][1] * tmp1;\n fjac[i][j][k][3][2] = -c2 * u[i][j][k][2] * tmp1;\n fjac[i][j][k][3][3] = (2.0 - c2) * u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][4] = c2;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][3] * tmp1);\n fjac[i][j][k][4][1] = -c2 * (u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][3] = c1 * (u[i][j][k][4] * tmp1) - 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + 3.0 * u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -con43 * c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n }\n }\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,j,k)", "context_chars": 100, "text": "rallel for private (tmp1,tmp2,tmp3,i,j,k)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,k) firstprivate (c3c4,c1345,c1,c2,c3,c4,con43)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 0.0;\n fjac[i][j][k][0][2] = 0.0;\n fjac[i][j][k][0][3] = 1.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][1][2] = 0.0;\n fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][1][4] = 0.0;\n fjac[i][j][k][2][0] = -(u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][2][1] = 0.0;\n fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][4] = 0.0;\n fjac[i][j][k][3][0] = -(u[i][j][k][3] * u[i][j][k][3] * tmp2) + 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][3][1] = -c2 * u[i][j][k][1] * tmp1;\n fjac[i][j][k][3][2] = -c2 * u[i][j][k][2] * tmp1;\n fjac[i][j][k][3][3] = (2.0 - c2) * u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][4] = c2;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][3] * tmp1);\n fjac[i][j][k][4][1] = -c2 * (u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][3] = c1 * (u[i][j][k][4] * tmp1) - 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + 3.0 * u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -con43 * c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n }\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,k) firstprivate (c3c4,c1345,c1,c2,c3,c4,con43)", "context_chars": 100, "text": "llel for private (tmp1,tmp2,tmp3,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n fjac[i][j][k][0][0] = 0.0;\n fjac[i][j][k][0][1] = 0.0;\n fjac[i][j][k][0][2] = 0.0;\n fjac[i][j][k][0][3] = 1.0;\n fjac[i][j][k][0][4] = 0.0;\n fjac[i][j][k][1][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][1][2] = 0.0;\n fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;\n fjac[i][j][k][1][4] = 0.0;\n fjac[i][j][k][2][0] = -(u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][2][1] = 0.0;\n fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;\n fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;\n fjac[i][j][k][2][4] = 0.0;\n fjac[i][j][k][3][0] = -(u[i][j][k][3] * u[i][j][k][3] * tmp2) + 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][3][1] = -c2 * u[i][j][k][1] * tmp1;\n fjac[i][j][k][3][2] = -c2 * u[i][j][k][2] * tmp1;\n fjac[i][j][k][3][3] = (2.0 - c2) * u[i][j][k][3] * tmp1;\n fjac[i][j][k][3][4] = c2;\n fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][3] * tmp1);\n fjac[i][j][k][4][1] = -c2 * (u[i][j][k][1] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][3]) * tmp2;\n fjac[i][j][k][4][3] = c1 * (u[i][j][k][4] * tmp1) - 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + 3.0 * u[i][j][k][3] * u[i][j][k][3]) * tmp2);\n fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;\n njac[i][j][k][0][0] = 0.0;\n njac[i][j][k][0][1] = 0.0;\n njac[i][j][k][0][2] = 0.0;\n njac[i][j][k][0][3] = 0.0;\n njac[i][j][k][0][4] = 0.0;\n njac[i][j][k][1][0] = -c3c4 * tmp2 * u[i][j][k][1];\n njac[i][j][k][1][1] = c3c4 * tmp1;\n njac[i][j][k][1][2] = 0.0;\n njac[i][j][k][1][3] = 0.0;\n njac[i][j][k][1][4] = 0.0;\n njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];\n njac[i][j][k][2][1] = 0.0;\n njac[i][j][k][2][2] = c3c4 * tmp1;\n njac[i][j][k][2][3] = 0.0;\n njac[i][j][k][2][4] = 0.0;\n njac[i][j][k][3][0] = -con43 * c3c4 * tmp2 * u[i][j][k][3];\n njac[i][j][k][3][1] = 0.0;\n njac[i][j][k][3][2] = 0.0;\n njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;\n njac[i][j][k][3][4] = 0.0;\n njac[i][j][k][4][0] = -(c3c4 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c3c4 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (con43 * c3c4 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4];\n njac[i][j][k][4][1] = (c3c4 - c1345) * tmp2 * u[i][j][k][1];\n njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];\n njac[i][j][k][4][3] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][3];\n njac[i][j][k][4][4] = c1345 * tmp1;\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,k) firstprivate (c3c4,c1345,c1,c2,c3,c4,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,i,j,k)", "context_chars": 100, "text": " hand side in z direction\nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,k) firstprivate (tz1,tz2,dz1,dz2,dz3,dz4,dz5,dt)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n tmp1 = dt * tz1;\n tmp2 = dt * tz2;\n lhs[i][j][k][0][0][0] = -tmp2 * fjac[i][j][k - 1][0][0] - tmp1 * njac[i][j][k - 1][0][0] - tmp1 * dz1;\n lhs[i][j][k][0][0][1] = -tmp2 * fjac[i][j][k - 1][0][1] - tmp1 * njac[i][j][k - 1][0][1];\n lhs[i][j][k][0][0][2] = -tmp2 * fjac[i][j][k - 1][0][2] - tmp1 * njac[i][j][k - 1][0][2];\n lhs[i][j][k][0][0][3] = -tmp2 * fjac[i][j][k - 1][0][3] - tmp1 * njac[i][j][k - 1][0][3];\n lhs[i][j][k][0][0][4] = -tmp2 * fjac[i][j][k - 1][0][4] - tmp1 * njac[i][j][k - 1][0][4];\n lhs[i][j][k][0][1][0] = -tmp2 * fjac[i][j][k - 1][1][0] - tmp1 * njac[i][j][k - 1][1][0];\n lhs[i][j][k][0][1][1] = -tmp2 * fjac[i][j][k - 1][1][1] - tmp1 * njac[i][j][k - 1][1][1] - tmp1 * dz2;\n lhs[i][j][k][0][1][2] = -tmp2 * fjac[i][j][k - 1][1][2] - tmp1 * njac[i][j][k - 1][1][2];\n lhs[i][j][k][0][1][3] = -tmp2 * fjac[i][j][k - 1][1][3] - tmp1 * njac[i][j][k - 1][1][3];\n lhs[i][j][k][0][1][4] = -tmp2 * fjac[i][j][k - 1][1][4] - tmp1 * njac[i][j][k - 1][1][4];\n lhs[i][j][k][0][2][0] = -tmp2 * fjac[i][j][k - 1][2][0] - tmp1 * njac[i][j][k - 1][2][0];\n lhs[i][j][k][0][2][1] = -tmp2 * fjac[i][j][k - 1][2][1] - tmp1 * njac[i][j][k - 1][2][1];\n lhs[i][j][k][0][2][2] = -tmp2 * fjac[i][j][k - 1][2][2] - tmp1 * njac[i][j][k - 1][2][2] - tmp1 * dz3;\n lhs[i][j][k][0][2][3] = -tmp2 * fjac[i][j][k - 1][2][3] - tmp1 * njac[i][j][k - 1][2][3];\n lhs[i][j][k][0][2][4] = -tmp2 * fjac[i][j][k - 1][2][4] - tmp1 * njac[i][j][k - 1][2][4];\n lhs[i][j][k][0][3][0] = -tmp2 * fjac[i][j][k - 1][3][0] - tmp1 * njac[i][j][k - 1][3][0];\n lhs[i][j][k][0][3][1] = -tmp2 * fjac[i][j][k - 1][3][1] - tmp1 * njac[i][j][k - 1][3][1];\n lhs[i][j][k][0][3][2] = -tmp2 * fjac[i][j][k - 1][3][2] - tmp1 * njac[i][j][k - 1][3][2];\n lhs[i][j][k][0][3][3] = -tmp2 * fjac[i][j][k - 1][3][3] - tmp1 * njac[i][j][k - 1][3][3] - tmp1 * dz4;\n lhs[i][j][k][0][3][4] = -tmp2 * fjac[i][j][k - 1][3][4] - tmp1 * njac[i][j][k - 1][3][4];\n lhs[i][j][k][0][4][0] = -tmp2 * fjac[i][j][k - 1][4][0] - tmp1 * njac[i][j][k - 1][4][0];\n lhs[i][j][k][0][4][1] = -tmp2 * fjac[i][j][k - 1][4][1] - tmp1 * njac[i][j][k - 1][4][1];\n lhs[i][j][k][0][4][2] = -tmp2 * fjac[i][j][k - 1][4][2] - tmp1 * njac[i][j][k - 1][4][2];\n lhs[i][j][k][0][4][3] = -tmp2 * fjac[i][j][k - 1][4][3] - tmp1 * njac[i][j][k - 1][4][3];\n lhs[i][j][k][0][4][4] = -tmp2 * fjac[i][j][k - 1][4][4] - tmp1 * njac[i][j][k - 1][4][4] - tmp1 * dz5;\n lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1;\n lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2;\n lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3;\n lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4;\n lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5;\n lhs[i][j][k][2][0][0] = tmp2 * fjac[i][j][k + 1][0][0] - tmp1 * njac[i][j][k + 1][0][0] - tmp1 * dz1;\n lhs[i][j][k][2][0][1] = tmp2 * fjac[i][j][k + 1][0][1] - tmp1 * njac[i][j][k + 1][0][1];\n lhs[i][j][k][2][0][2] = tmp2 * fjac[i][j][k + 1][0][2] - tmp1 * njac[i][j][k + 1][0][2];\n lhs[i][j][k][2][0][3] = tmp2 * fjac[i][j][k + 1][0][3] - tmp1 * njac[i][j][k + 1][0][3];\n lhs[i][j][k][2][0][4] = tmp2 * fjac[i][j][k + 1][0][4] - tmp1 * njac[i][j][k + 1][0][4];\n lhs[i][j][k][2][1][0] = tmp2 * fjac[i][j][k + 1][1][0] - tmp1 * njac[i][j][k + 1][1][0];\n lhs[i][j][k][2][1][1] = tmp2 * fjac[i][j][k + 1][1][1] - tmp1 * njac[i][j][k + 1][1][1] - tmp1 * dz2;\n lhs[i][j][k][2][1][2] = tmp2 * fjac[i][j][k + 1][1][2] - tmp1 * njac[i][j][k + 1][1][2];\n lhs[i][j][k][2][1][3] = tmp2 * fjac[i][j][k + 1][1][3] - tmp1 * njac[i][j][k + 1][1][3];\n lhs[i][j][k][2][1][4] = tmp2 * fjac[i][j][k + 1][1][4] - tmp1 * njac[i][j][k + 1][1][4];\n lhs[i][j][k][2][2][0] = tmp2 * fjac[i][j][k + 1][2][0] - tmp1 * njac[i][j][k + 1][2][0];\n lhs[i][j][k][2][2][1] = tmp2 * fjac[i][j][k + 1][2][1] - tmp1 * njac[i][j][k + 1][2][1];\n lhs[i][j][k][2][2][2] = tmp2 * fjac[i][j][k + 1][2][2] - tmp1 * njac[i][j][k + 1][2][2] - tmp1 * dz3;\n lhs[i][j][k][2][2][3] = tmp2 * fjac[i][j][k + 1][2][3] - tmp1 * njac[i][j][k + 1][2][3];\n lhs[i][j][k][2][2][4] = tmp2 * fjac[i][j][k + 1][2][4] - tmp1 * njac[i][j][k + 1][2][4];\n lhs[i][j][k][2][3][0] = tmp2 * fjac[i][j][k + 1][3][0] - tmp1 * njac[i][j][k + 1][3][0];\n lhs[i][j][k][2][3][1] = tmp2 * fjac[i][j][k + 1][3][1] - tmp1 * njac[i][j][k + 1][3][1];\n lhs[i][j][k][2][3][2] = tmp2 * fjac[i][j][k + 1][3][2] - tmp1 * njac[i][j][k + 1][3][2];\n lhs[i][j][k][2][3][3] = tmp2 * fjac[i][j][k + 1][3][3] - tmp1 * njac[i][j][k + 1][3][3] - tmp1 * dz4;\n lhs[i][j][k][2][3][4] = tmp2 * fjac[i][j][k + 1][3][4] - tmp1 * njac[i][j][k + 1][3][4];\n lhs[i][j][k][2][4][0] = tmp2 * fjac[i][j][k + 1][4][0] - tmp1 * njac[i][j][k + 1][4][0];\n lhs[i][j][k][2][4][1] = tmp2 * fjac[i][j][k + 1][4][1] - tmp1 * njac[i][j][k + 1][4][1];\n lhs[i][j][k][2][4][2] = tmp2 * fjac[i][j][k + 1][4][2] - tmp1 * njac[i][j][k + 1][4][2];\n lhs[i][j][k][2][4][3] = tmp2 * fjac[i][j][k + 1][4][3] - tmp1 * njac[i][j][k + 1][4][3];\n lhs[i][j][k][2][4][4] = tmp2 * fjac[i][j][k + 1][4][4] - tmp1 * njac[i][j][k + 1][4][4] - tmp1 * dz5;\n }\n }\n } #pragma omp parallel for private (tmp1,tmp2,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,j,k)", "context_chars": 100, "text": "mp parallel for private (tmp1,tmp2,i,j,k)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,k) firstprivate (tz1,tz2,dz1,dz2,dz3,dz4,dz5,dt)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n tmp1 = dt * tz1;\n tmp2 = dt * tz2;\n lhs[i][j][k][0][0][0] = -tmp2 * fjac[i][j][k - 1][0][0] - tmp1 * njac[i][j][k - 1][0][0] - tmp1 * dz1;\n lhs[i][j][k][0][0][1] = -tmp2 * fjac[i][j][k - 1][0][1] - tmp1 * njac[i][j][k - 1][0][1];\n lhs[i][j][k][0][0][2] = -tmp2 * fjac[i][j][k - 1][0][2] - tmp1 * njac[i][j][k - 1][0][2];\n lhs[i][j][k][0][0][3] = -tmp2 * fjac[i][j][k - 1][0][3] - tmp1 * njac[i][j][k - 1][0][3];\n lhs[i][j][k][0][0][4] = -tmp2 * fjac[i][j][k - 1][0][4] - tmp1 * njac[i][j][k - 1][0][4];\n lhs[i][j][k][0][1][0] = -tmp2 * fjac[i][j][k - 1][1][0] - tmp1 * njac[i][j][k - 1][1][0];\n lhs[i][j][k][0][1][1] = -tmp2 * fjac[i][j][k - 1][1][1] - tmp1 * njac[i][j][k - 1][1][1] - tmp1 * dz2;\n lhs[i][j][k][0][1][2] = -tmp2 * fjac[i][j][k - 1][1][2] - tmp1 * njac[i][j][k - 1][1][2];\n lhs[i][j][k][0][1][3] = -tmp2 * fjac[i][j][k - 1][1][3] - tmp1 * njac[i][j][k - 1][1][3];\n lhs[i][j][k][0][1][4] = -tmp2 * fjac[i][j][k - 1][1][4] - tmp1 * njac[i][j][k - 1][1][4];\n lhs[i][j][k][0][2][0] = -tmp2 * fjac[i][j][k - 1][2][0] - tmp1 * njac[i][j][k - 1][2][0];\n lhs[i][j][k][0][2][1] = -tmp2 * fjac[i][j][k - 1][2][1] - tmp1 * njac[i][j][k - 1][2][1];\n lhs[i][j][k][0][2][2] = -tmp2 * fjac[i][j][k - 1][2][2] - tmp1 * njac[i][j][k - 1][2][2] - tmp1 * dz3;\n lhs[i][j][k][0][2][3] = -tmp2 * fjac[i][j][k - 1][2][3] - tmp1 * njac[i][j][k - 1][2][3];\n lhs[i][j][k][0][2][4] = -tmp2 * fjac[i][j][k - 1][2][4] - tmp1 * njac[i][j][k - 1][2][4];\n lhs[i][j][k][0][3][0] = -tmp2 * fjac[i][j][k - 1][3][0] - tmp1 * njac[i][j][k - 1][3][0];\n lhs[i][j][k][0][3][1] = -tmp2 * fjac[i][j][k - 1][3][1] - tmp1 * njac[i][j][k - 1][3][1];\n lhs[i][j][k][0][3][2] = -tmp2 * fjac[i][j][k - 1][3][2] - tmp1 * njac[i][j][k - 1][3][2];\n lhs[i][j][k][0][3][3] = -tmp2 * fjac[i][j][k - 1][3][3] - tmp1 * njac[i][j][k - 1][3][3] - tmp1 * dz4;\n lhs[i][j][k][0][3][4] = -tmp2 * fjac[i][j][k - 1][3][4] - tmp1 * njac[i][j][k - 1][3][4];\n lhs[i][j][k][0][4][0] = -tmp2 * fjac[i][j][k - 1][4][0] - tmp1 * njac[i][j][k - 1][4][0];\n lhs[i][j][k][0][4][1] = -tmp2 * fjac[i][j][k - 1][4][1] - tmp1 * njac[i][j][k - 1][4][1];\n lhs[i][j][k][0][4][2] = -tmp2 * fjac[i][j][k - 1][4][2] - tmp1 * njac[i][j][k - 1][4][2];\n lhs[i][j][k][0][4][3] = -tmp2 * fjac[i][j][k - 1][4][3] - tmp1 * njac[i][j][k - 1][4][3];\n lhs[i][j][k][0][4][4] = -tmp2 * fjac[i][j][k - 1][4][4] - tmp1 * njac[i][j][k - 1][4][4] - tmp1 * dz5;\n lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1;\n lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2;\n lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3;\n lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4;\n lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5;\n lhs[i][j][k][2][0][0] = tmp2 * fjac[i][j][k + 1][0][0] - tmp1 * njac[i][j][k + 1][0][0] - tmp1 * dz1;\n lhs[i][j][k][2][0][1] = tmp2 * fjac[i][j][k + 1][0][1] - tmp1 * njac[i][j][k + 1][0][1];\n lhs[i][j][k][2][0][2] = tmp2 * fjac[i][j][k + 1][0][2] - tmp1 * njac[i][j][k + 1][0][2];\n lhs[i][j][k][2][0][3] = tmp2 * fjac[i][j][k + 1][0][3] - tmp1 * njac[i][j][k + 1][0][3];\n lhs[i][j][k][2][0][4] = tmp2 * fjac[i][j][k + 1][0][4] - tmp1 * njac[i][j][k + 1][0][4];\n lhs[i][j][k][2][1][0] = tmp2 * fjac[i][j][k + 1][1][0] - tmp1 * njac[i][j][k + 1][1][0];\n lhs[i][j][k][2][1][1] = tmp2 * fjac[i][j][k + 1][1][1] - tmp1 * njac[i][j][k + 1][1][1] - tmp1 * dz2;\n lhs[i][j][k][2][1][2] = tmp2 * fjac[i][j][k + 1][1][2] - tmp1 * njac[i][j][k + 1][1][2];\n lhs[i][j][k][2][1][3] = tmp2 * fjac[i][j][k + 1][1][3] - tmp1 * njac[i][j][k + 1][1][3];\n lhs[i][j][k][2][1][4] = tmp2 * fjac[i][j][k + 1][1][4] - tmp1 * njac[i][j][k + 1][1][4];\n lhs[i][j][k][2][2][0] = tmp2 * fjac[i][j][k + 1][2][0] - tmp1 * njac[i][j][k + 1][2][0];\n lhs[i][j][k][2][2][1] = tmp2 * fjac[i][j][k + 1][2][1] - tmp1 * njac[i][j][k + 1][2][1];\n lhs[i][j][k][2][2][2] = tmp2 * fjac[i][j][k + 1][2][2] - tmp1 * njac[i][j][k + 1][2][2] - tmp1 * dz3;\n lhs[i][j][k][2][2][3] = tmp2 * fjac[i][j][k + 1][2][3] - tmp1 * njac[i][j][k + 1][2][3];\n lhs[i][j][k][2][2][4] = tmp2 * fjac[i][j][k + 1][2][4] - tmp1 * njac[i][j][k + 1][2][4];\n lhs[i][j][k][2][3][0] = tmp2 * fjac[i][j][k + 1][3][0] - tmp1 * njac[i][j][k + 1][3][0];\n lhs[i][j][k][2][3][1] = tmp2 * fjac[i][j][k + 1][3][1] - tmp1 * njac[i][j][k + 1][3][1];\n lhs[i][j][k][2][3][2] = tmp2 * fjac[i][j][k + 1][3][2] - tmp1 * njac[i][j][k + 1][3][2];\n lhs[i][j][k][2][3][3] = tmp2 * fjac[i][j][k + 1][3][3] - tmp1 * njac[i][j][k + 1][3][3] - tmp1 * dz4;\n lhs[i][j][k][2][3][4] = tmp2 * fjac[i][j][k + 1][3][4] - tmp1 * njac[i][j][k + 1][3][4];\n lhs[i][j][k][2][4][0] = tmp2 * fjac[i][j][k + 1][4][0] - tmp1 * njac[i][j][k + 1][4][0];\n lhs[i][j][k][2][4][1] = tmp2 * fjac[i][j][k + 1][4][1] - tmp1 * njac[i][j][k + 1][4][1];\n lhs[i][j][k][2][4][2] = tmp2 * fjac[i][j][k + 1][4][2] - tmp1 * njac[i][j][k + 1][4][2];\n lhs[i][j][k][2][4][3] = tmp2 * fjac[i][j][k + 1][4][3] - tmp1 * njac[i][j][k + 1][4][3];\n lhs[i][j][k][2][4][4] = tmp2 * fjac[i][j][k + 1][4][4] - tmp1 * njac[i][j][k + 1][4][4] - tmp1 * dz5;\n }\n } #pragma omp parallel for private (tmp1,tmp2,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,k) firstprivate (tz1,tz2,dz1,dz2,dz3,dz4,dz5,dt)", "context_chars": 100, "text": " parallel for private (tmp1,tmp2,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n tmp1 = dt * tz1;\n tmp2 = dt * tz2;\n lhs[i][j][k][0][0][0] = -tmp2 * fjac[i][j][k - 1][0][0] - tmp1 * njac[i][j][k - 1][0][0] - tmp1 * dz1;\n lhs[i][j][k][0][0][1] = -tmp2 * fjac[i][j][k - 1][0][1] - tmp1 * njac[i][j][k - 1][0][1];\n lhs[i][j][k][0][0][2] = -tmp2 * fjac[i][j][k - 1][0][2] - tmp1 * njac[i][j][k - 1][0][2];\n lhs[i][j][k][0][0][3] = -tmp2 * fjac[i][j][k - 1][0][3] - tmp1 * njac[i][j][k - 1][0][3];\n lhs[i][j][k][0][0][4] = -tmp2 * fjac[i][j][k - 1][0][4] - tmp1 * njac[i][j][k - 1][0][4];\n lhs[i][j][k][0][1][0] = -tmp2 * fjac[i][j][k - 1][1][0] - tmp1 * njac[i][j][k - 1][1][0];\n lhs[i][j][k][0][1][1] = -tmp2 * fjac[i][j][k - 1][1][1] - tmp1 * njac[i][j][k - 1][1][1] - tmp1 * dz2;\n lhs[i][j][k][0][1][2] = -tmp2 * fjac[i][j][k - 1][1][2] - tmp1 * njac[i][j][k - 1][1][2];\n lhs[i][j][k][0][1][3] = -tmp2 * fjac[i][j][k - 1][1][3] - tmp1 * njac[i][j][k - 1][1][3];\n lhs[i][j][k][0][1][4] = -tmp2 * fjac[i][j][k - 1][1][4] - tmp1 * njac[i][j][k - 1][1][4];\n lhs[i][j][k][0][2][0] = -tmp2 * fjac[i][j][k - 1][2][0] - tmp1 * njac[i][j][k - 1][2][0];\n lhs[i][j][k][0][2][1] = -tmp2 * fjac[i][j][k - 1][2][1] - tmp1 * njac[i][j][k - 1][2][1];\n lhs[i][j][k][0][2][2] = -tmp2 * fjac[i][j][k - 1][2][2] - tmp1 * njac[i][j][k - 1][2][2] - tmp1 * dz3;\n lhs[i][j][k][0][2][3] = -tmp2 * fjac[i][j][k - 1][2][3] - tmp1 * njac[i][j][k - 1][2][3];\n lhs[i][j][k][0][2][4] = -tmp2 * fjac[i][j][k - 1][2][4] - tmp1 * njac[i][j][k - 1][2][4];\n lhs[i][j][k][0][3][0] = -tmp2 * fjac[i][j][k - 1][3][0] - tmp1 * njac[i][j][k - 1][3][0];\n lhs[i][j][k][0][3][1] = -tmp2 * fjac[i][j][k - 1][3][1] - tmp1 * njac[i][j][k - 1][3][1];\n lhs[i][j][k][0][3][2] = -tmp2 * fjac[i][j][k - 1][3][2] - tmp1 * njac[i][j][k - 1][3][2];\n lhs[i][j][k][0][3][3] = -tmp2 * fjac[i][j][k - 1][3][3] - tmp1 * njac[i][j][k - 1][3][3] - tmp1 * dz4;\n lhs[i][j][k][0][3][4] = -tmp2 * fjac[i][j][k - 1][3][4] - tmp1 * njac[i][j][k - 1][3][4];\n lhs[i][j][k][0][4][0] = -tmp2 * fjac[i][j][k - 1][4][0] - tmp1 * njac[i][j][k - 1][4][0];\n lhs[i][j][k][0][4][1] = -tmp2 * fjac[i][j][k - 1][4][1] - tmp1 * njac[i][j][k - 1][4][1];\n lhs[i][j][k][0][4][2] = -tmp2 * fjac[i][j][k - 1][4][2] - tmp1 * njac[i][j][k - 1][4][2];\n lhs[i][j][k][0][4][3] = -tmp2 * fjac[i][j][k - 1][4][3] - tmp1 * njac[i][j][k - 1][4][3];\n lhs[i][j][k][0][4][4] = -tmp2 * fjac[i][j][k - 1][4][4] - tmp1 * njac[i][j][k - 1][4][4] - tmp1 * dz5;\n lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1;\n lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2;\n lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3;\n lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4;\n lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5;\n lhs[i][j][k][2][0][0] = tmp2 * fjac[i][j][k + 1][0][0] - tmp1 * njac[i][j][k + 1][0][0] - tmp1 * dz1;\n lhs[i][j][k][2][0][1] = tmp2 * fjac[i][j][k + 1][0][1] - tmp1 * njac[i][j][k + 1][0][1];\n lhs[i][j][k][2][0][2] = tmp2 * fjac[i][j][k + 1][0][2] - tmp1 * njac[i][j][k + 1][0][2];\n lhs[i][j][k][2][0][3] = tmp2 * fjac[i][j][k + 1][0][3] - tmp1 * njac[i][j][k + 1][0][3];\n lhs[i][j][k][2][0][4] = tmp2 * fjac[i][j][k + 1][0][4] - tmp1 * njac[i][j][k + 1][0][4];\n lhs[i][j][k][2][1][0] = tmp2 * fjac[i][j][k + 1][1][0] - tmp1 * njac[i][j][k + 1][1][0];\n lhs[i][j][k][2][1][1] = tmp2 * fjac[i][j][k + 1][1][1] - tmp1 * njac[i][j][k + 1][1][1] - tmp1 * dz2;\n lhs[i][j][k][2][1][2] = tmp2 * fjac[i][j][k + 1][1][2] - tmp1 * njac[i][j][k + 1][1][2];\n lhs[i][j][k][2][1][3] = tmp2 * fjac[i][j][k + 1][1][3] - tmp1 * njac[i][j][k + 1][1][3];\n lhs[i][j][k][2][1][4] = tmp2 * fjac[i][j][k + 1][1][4] - tmp1 * njac[i][j][k + 1][1][4];\n lhs[i][j][k][2][2][0] = tmp2 * fjac[i][j][k + 1][2][0] - tmp1 * njac[i][j][k + 1][2][0];\n lhs[i][j][k][2][2][1] = tmp2 * fjac[i][j][k + 1][2][1] - tmp1 * njac[i][j][k + 1][2][1];\n lhs[i][j][k][2][2][2] = tmp2 * fjac[i][j][k + 1][2][2] - tmp1 * njac[i][j][k + 1][2][2] - tmp1 * dz3;\n lhs[i][j][k][2][2][3] = tmp2 * fjac[i][j][k + 1][2][3] - tmp1 * njac[i][j][k + 1][2][3];\n lhs[i][j][k][2][2][4] = tmp2 * fjac[i][j][k + 1][2][4] - tmp1 * njac[i][j][k + 1][2][4];\n lhs[i][j][k][2][3][0] = tmp2 * fjac[i][j][k + 1][3][0] - tmp1 * njac[i][j][k + 1][3][0];\n lhs[i][j][k][2][3][1] = tmp2 * fjac[i][j][k + 1][3][1] - tmp1 * njac[i][j][k + 1][3][1];\n lhs[i][j][k][2][3][2] = tmp2 * fjac[i][j][k + 1][3][2] - tmp1 * njac[i][j][k + 1][3][2];\n lhs[i][j][k][2][3][3] = tmp2 * fjac[i][j][k + 1][3][3] - tmp1 * njac[i][j][k + 1][3][3] - tmp1 * dz4;\n lhs[i][j][k][2][3][4] = tmp2 * fjac[i][j][k + 1][3][4] - tmp1 * njac[i][j][k + 1][3][4];\n lhs[i][j][k][2][4][0] = tmp2 * fjac[i][j][k + 1][4][0] - tmp1 * njac[i][j][k + 1][4][0];\n lhs[i][j][k][2][4][1] = tmp2 * fjac[i][j][k + 1][4][1] - tmp1 * njac[i][j][k + 1][4][1];\n lhs[i][j][k][2][4][2] = tmp2 * fjac[i][j][k + 1][4][2] - tmp1 * njac[i][j][k + 1][4][2];\n lhs[i][j][k][2][4][3] = tmp2 * fjac[i][j][k + 1][4][3] - tmp1 * njac[i][j][k + 1][4][3];\n lhs[i][j][k][2][4][4] = tmp2 * fjac[i][j][k + 1][4][4] - tmp1 * njac[i][j][k + 1][4][4] - tmp1 * dz5;\n } #pragma omp parallel for private (tmp1,tmp2,k) firstprivate (tz1,tz2,dz1,dz2,dz3,dz4,dz5,dt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (rho_inv,i,j,k)", "context_chars": 100, "text": " and the speed of sound.\nc-------------------------------------------------------------------*/\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (rho_inv,j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (rho_inv,k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n rho_inv = 1.0 / u[i][j][k][0];\n rho_i[i][j][k] = rho_inv;\n us[i][j][k] = u[i][j][k][1] * rho_inv;\n vs[i][j][k] = u[i][j][k][2] * rho_inv;\n ws[i][j][k] = u[i][j][k][3] * rho_inv;\n square[i][j][k] = 0.5 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * rho_inv;\n qs[i][j][k] = square[i][j][k] * rho_inv;\n }\n }\n } #pragma omp parallel for private (rho_inv,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (rho_inv,j,k)", "context_chars": 100, "text": "agma omp parallel for private (rho_inv,i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (rho_inv,k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n rho_inv = 1.0 / u[i][j][k][0];\n rho_i[i][j][k] = rho_inv;\n us[i][j][k] = u[i][j][k][1] * rho_inv;\n vs[i][j][k] = u[i][j][k][2] * rho_inv;\n ws[i][j][k] = u[i][j][k][3] * rho_inv;\n square[i][j][k] = 0.5 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * rho_inv;\n qs[i][j][k] = square[i][j][k] * rho_inv;\n }\n } #pragma omp parallel for private (rho_inv,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (rho_inv,k)", "context_chars": 100, "text": "ma omp parallel for private (rho_inv,j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n rho_inv = 1.0 / u[i][j][k][0];\n rho_i[i][j][k] = rho_inv;\n us[i][j][k] = u[i][j][k][1] * rho_inv;\n vs[i][j][k] = u[i][j][k][2] * rho_inv;\n ws[i][j][k] = u[i][j][k][3] * rho_inv;\n square[i][j][k] = 0.5 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * rho_inv;\n qs[i][j][k] = square[i][j][k] * rho_inv;\n } #pragma omp parallel for private (rho_inv,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "undary \nc-------------------------------------------------------------------*/\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = forcing[i][j][k][m];\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,j,k,m)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = forcing[i][j][k][m];\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = forcing[i][j][k][m];\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "pragma omp parallel for private (k,m)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = forcing[i][j][k][m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (uijk,up1,um1,i,j,k)", "context_chars": 100, "text": "pute xi-direction fluxes \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (uijk,up1,um1,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (uijk,up1,um1,k) firstprivate (tx2,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n uijk = us[i][j][k];\n up1 = us[i + 1][j][k];\n um1 = us[i - 1][j][k];\n rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i + 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i - 1][j][k][0]) - tx2 * (u[i + 1][j][k][1] - u[i - 1][j][k][1]);\n rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i + 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i - 1][j][k][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[i + 1][j][k][1] * up1 - u[i - 1][j][k][1] * um1 + (u[i + 1][j][k][4] - square[i + 1][j][k] - u[i - 1][j][k][4] + square[i - 1][j][k]) * c2);\n rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i + 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i - 1][j][k][2]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[i + 1][j][k][2] * up1 - u[i - 1][j][k][2] * um1);\n rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i + 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i - 1][j][k][3]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[i + 1][j][k][3] * up1 - u[i - 1][j][k][3] * um1);\n rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i + 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i - 1][j][k][4]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[i + 1][j][k][4] * rho_i[i + 1][j][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i - 1][j][k][4] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[i + 1][j][k][4] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[i - 1][j][k][4] - c2 * square[i - 1][j][k]) * um1);\n }\n }\n } #pragma omp parallel for private (uijk,up1,um1,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (uijk,up1,um1,j,k)", "context_chars": 100, "text": "parallel for private (uijk,up1,um1,i,j,k)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (uijk,up1,um1,k) firstprivate (tx2,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n uijk = us[i][j][k];\n up1 = us[i + 1][j][k];\n um1 = us[i - 1][j][k];\n rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i + 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i - 1][j][k][0]) - tx2 * (u[i + 1][j][k][1] - u[i - 1][j][k][1]);\n rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i + 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i - 1][j][k][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[i + 1][j][k][1] * up1 - u[i - 1][j][k][1] * um1 + (u[i + 1][j][k][4] - square[i + 1][j][k] - u[i - 1][j][k][4] + square[i - 1][j][k]) * c2);\n rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i + 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i - 1][j][k][2]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[i + 1][j][k][2] * up1 - u[i - 1][j][k][2] * um1);\n rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i + 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i - 1][j][k][3]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[i + 1][j][k][3] * up1 - u[i - 1][j][k][3] * um1);\n rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i + 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i - 1][j][k][4]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[i + 1][j][k][4] * rho_i[i + 1][j][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i - 1][j][k][4] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[i + 1][j][k][4] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[i - 1][j][k][4] - c2 * square[i - 1][j][k]) * um1);\n }\n } #pragma omp parallel for private (uijk,up1,um1,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (uijk,up1,um1,k) firstprivate (tx2,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2,con43)", "context_chars": 100, "text": "rallel for private (uijk,up1,um1,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n uijk = us[i][j][k];\n up1 = us[i + 1][j][k];\n um1 = us[i - 1][j][k];\n rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i + 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i - 1][j][k][0]) - tx2 * (u[i + 1][j][k][1] - u[i - 1][j][k][1]);\n rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i + 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i - 1][j][k][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[i + 1][j][k][1] * up1 - u[i - 1][j][k][1] * um1 + (u[i + 1][j][k][4] - square[i + 1][j][k] - u[i - 1][j][k][4] + square[i - 1][j][k]) * c2);\n rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i + 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i - 1][j][k][2]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[i + 1][j][k][2] * up1 - u[i - 1][j][k][2] * um1);\n rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i + 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i - 1][j][k][3]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[i + 1][j][k][3] * up1 - u[i - 1][j][k][3] * um1);\n rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i + 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i - 1][j][k][4]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[i + 1][j][k][4] * rho_i[i + 1][j][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i - 1][j][k][4] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[i + 1][j][k][4] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[i - 1][j][k][4] - c2 * square[i - 1][j][k]) * um1);\n } #pragma omp parallel for private (uijk,up1,um1,k) firstprivate (tx2,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "n \nc-------------------------------------------------------------------*/\n i = 1;\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,i)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,i)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,i)", "context_chars": 100, "text": "pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": " (5.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n }\n i = 2;\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,i)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (- 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,i)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (- 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,i)", "context_chars": 100, "text": "pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (- 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n }\n \nfor (i = 3; i <= grid_points[0] - 3 - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "pragma omp parallel for private (i,j,k,m)\n for (i = 3; i <= grid_points[0] - 3 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "agma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp)", "context_chars": 100, "text": "ma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": " * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n }\n }\n i = grid_points[0] - 3;\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,i)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,i)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,i)", "context_chars": 100, "text": "pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m]);\n }\n }\n }\n i = grid_points[0] - 2;\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,i)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4. * u[i - 1][j][k][m] + 5.0 * u[i][j][k][m]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,i)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4. * u[i - 1][j][k][m] + 5.0 * u[i][j][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,i)", "context_chars": 100, "text": "pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4. * u[i - 1][j][k][m] + 5.0 * u[i][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (vijk,vp1,vm1,i,j,k)", "context_chars": 100, "text": "ute eta-direction fluxes \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (vijk,vp1,vm1,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (vijk,vp1,vm1,k) firstprivate (ty2,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n vijk = vs[i][j][k];\n vp1 = vs[i][j + 1][k];\n vm1 = vs[i][j - 1][k];\n rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j + 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j - 1][k][0]) - ty2 * (u[i][j + 1][k][2] - u[i][j - 1][k][2]);\n rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j + 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j - 1][k][1]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[i][j + 1][k][1] * vp1 - u[i][j - 1][k][1] * vm1);\n rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j + 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j - 1][k][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[i][j + 1][k][2] * vp1 - u[i][j - 1][k][2] * vm1 + (u[i][j + 1][k][4] - square[i][j + 1][k] - u[i][j - 1][k][4] + square[i][j - 1][k]) * c2);\n rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j + 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j - 1][k][3]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[i][j + 1][k][3] * vp1 - u[i][j - 1][k][3] * vm1);\n rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j + 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j - 1][k][4]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[i][j + 1][k][4] * rho_i[i][j + 1][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j - 1][k][4] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[i][j + 1][k][4] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[i][j - 1][k][4] - c2 * square[i][j - 1][k]) * vm1);\n }\n }\n } #pragma omp parallel for private (vijk,vp1,vm1,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (vijk,vp1,vm1,j,k)", "context_chars": 100, "text": "parallel for private (vijk,vp1,vm1,i,j,k)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (vijk,vp1,vm1,k) firstprivate (ty2,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n vijk = vs[i][j][k];\n vp1 = vs[i][j + 1][k];\n vm1 = vs[i][j - 1][k];\n rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j + 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j - 1][k][0]) - ty2 * (u[i][j + 1][k][2] - u[i][j - 1][k][2]);\n rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j + 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j - 1][k][1]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[i][j + 1][k][1] * vp1 - u[i][j - 1][k][1] * vm1);\n rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j + 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j - 1][k][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[i][j + 1][k][2] * vp1 - u[i][j - 1][k][2] * vm1 + (u[i][j + 1][k][4] - square[i][j + 1][k] - u[i][j - 1][k][4] + square[i][j - 1][k]) * c2);\n rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j + 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j - 1][k][3]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[i][j + 1][k][3] * vp1 - u[i][j - 1][k][3] * vm1);\n rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j + 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j - 1][k][4]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[i][j + 1][k][4] * rho_i[i][j + 1][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j - 1][k][4] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[i][j + 1][k][4] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[i][j - 1][k][4] - c2 * square[i][j - 1][k]) * vm1);\n }\n } #pragma omp parallel for private (vijk,vp1,vm1,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (vijk,vp1,vm1,k) firstprivate (ty2,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2,con43)", "context_chars": 100, "text": "rallel for private (vijk,vp1,vm1,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n vijk = vs[i][j][k];\n vp1 = vs[i][j + 1][k];\n vm1 = vs[i][j - 1][k];\n rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j + 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j - 1][k][0]) - ty2 * (u[i][j + 1][k][2] - u[i][j - 1][k][2]);\n rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j + 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j - 1][k][1]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[i][j + 1][k][1] * vp1 - u[i][j - 1][k][1] * vm1);\n rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j + 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j - 1][k][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[i][j + 1][k][2] * vp1 - u[i][j - 1][k][2] * vm1 + (u[i][j + 1][k][4] - square[i][j + 1][k] - u[i][j - 1][k][4] + square[i][j - 1][k]) * c2);\n rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j + 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j - 1][k][3]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[i][j + 1][k][3] * vp1 - u[i][j - 1][k][3] * vm1);\n rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j + 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j - 1][k][4]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[i][j + 1][k][4] * rho_i[i][j + 1][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j - 1][k][4] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[i][j + 1][k][4] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[i][j - 1][k][4] - c2 * square[i][j - 1][k]) * vm1);\n } #pragma omp parallel for private (vijk,vp1,vm1,k) firstprivate (ty2,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": "ipation \nc-------------------------------------------------------------------*/\n j = 1;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,j)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,k,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,j)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,j)", "context_chars": 100, "text": "pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": " (5.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n }\n j = 2;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,j)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (- 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,k,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,j)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (- 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,j)", "context_chars": 100, "text": "pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (- 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n }\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 3; j <= grid_points[1] - 3 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "pragma omp parallel for private (i,j,k,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 3; j <= grid_points[1] - 3 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "agma omp parallel for private (j,k,m)\n for (j = 3; j <= grid_points[1] - 3 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp)", "context_chars": 100, "text": "ma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": " * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n }\n }\n j = grid_points[1] - 3;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,j)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m]);\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,k,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,j)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,j)", "context_chars": 100, "text": "pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": "m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m]);\n }\n }\n }\n j = grid_points[1] - 2;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,j)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4. * u[i][j - 1][k][m] + 5. * u[i][j][k][m]);\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,k,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,j)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4. * u[i][j - 1][k][m] + 5. * u[i][j][k][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,j)", "context_chars": 100, "text": "pragma omp parallel for private (k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4. * u[i][j - 1][k][m] + 5. * u[i][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (wijk,wp1,wm1,i,j,k)", "context_chars": 100, "text": "te zeta-direction fluxes \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (wijk,wp1,wm1,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (wijk,wp1,wm1,k) firstprivate (tz2,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n wijk = ws[i][j][k];\n wp1 = ws[i][j][k + 1];\n wm1 = ws[i][j][k - 1];\n rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k + 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k - 1][0]) - tz2 * (u[i][j][k + 1][3] - u[i][j][k - 1][3]);\n rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k + 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k - 1][1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[i][j][k + 1][1] * wp1 - u[i][j][k - 1][1] * wm1);\n rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k + 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k - 1][2]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[i][j][k + 1][2] * wp1 - u[i][j][k - 1][2] * wm1);\n rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k + 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k - 1][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[i][j][k + 1][3] * wp1 - u[i][j][k - 1][3] * wm1 + (u[i][j][k + 1][4] - square[i][j][k + 1] - u[i][j][k - 1][4] + square[i][j][k - 1]) * c2);\n rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k + 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k - 1][4]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[i][j][k + 1][4] * rho_i[i][j][k + 1] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j][k - 1][4] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[i][j][k + 1][4] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[i][j][k - 1][4] - c2 * square[i][j][k - 1]) * wm1);\n }\n }\n } #pragma omp parallel for private (wijk,wp1,wm1,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (wijk,wp1,wm1,j,k)", "context_chars": 100, "text": "parallel for private (wijk,wp1,wm1,i,j,k)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (wijk,wp1,wm1,k) firstprivate (tz2,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n wijk = ws[i][j][k];\n wp1 = ws[i][j][k + 1];\n wm1 = ws[i][j][k - 1];\n rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k + 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k - 1][0]) - tz2 * (u[i][j][k + 1][3] - u[i][j][k - 1][3]);\n rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k + 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k - 1][1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[i][j][k + 1][1] * wp1 - u[i][j][k - 1][1] * wm1);\n rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k + 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k - 1][2]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[i][j][k + 1][2] * wp1 - u[i][j][k - 1][2] * wm1);\n rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k + 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k - 1][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[i][j][k + 1][3] * wp1 - u[i][j][k - 1][3] * wm1 + (u[i][j][k + 1][4] - square[i][j][k + 1] - u[i][j][k - 1][4] + square[i][j][k - 1]) * c2);\n rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k + 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k - 1][4]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[i][j][k + 1][4] * rho_i[i][j][k + 1] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j][k - 1][4] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[i][j][k + 1][4] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[i][j][k - 1][4] - c2 * square[i][j][k - 1]) * wm1);\n }\n } #pragma omp parallel for private (wijk,wp1,wm1,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (wijk,wp1,wm1,k) firstprivate (tz2,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2,con43)", "context_chars": 100, "text": "rallel for private (wijk,wp1,wm1,j,k)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n wijk = ws[i][j][k];\n wp1 = ws[i][j][k + 1];\n wm1 = ws[i][j][k - 1];\n rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k + 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k - 1][0]) - tz2 * (u[i][j][k + 1][3] - u[i][j][k - 1][3]);\n rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k + 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k - 1][1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[i][j][k + 1][1] * wp1 - u[i][j][k - 1][1] * wm1);\n rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k + 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k - 1][2]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[i][j][k + 1][2] * wp1 - u[i][j][k - 1][2] * wm1);\n rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k + 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k - 1][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[i][j][k + 1][3] * wp1 - u[i][j][k - 1][3] * wm1 + (u[i][j][k + 1][4] - square[i][j][k + 1] - u[i][j][k - 1][4] + square[i][j][k - 1]) * c2);\n rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k + 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k - 1][4]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[i][j][k + 1][4] * rho_i[i][j][k + 1] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j][k - 1][4] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[i][j][k + 1][4] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[i][j][k - 1][4] - c2 * square[i][j][k - 1]) * wm1);\n } #pragma omp parallel for private (wijk,wp1,wm1,k) firstprivate (tz2,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n k = 1;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,k)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,k)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,k)", "context_chars": 100, "text": "pragma omp parallel for private (j,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": " (5.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n }\n k = 2;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,k)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (- 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,k)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (- 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,k)", "context_chars": 100, "text": "pragma omp parallel for private (j,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (- 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": " - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n }\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 3; k <= grid_points[2] - 3 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "pragma omp parallel for private (i,j,k,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 3; k <= grid_points[2] - 3 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "agma omp parallel for private (j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 3; k <= grid_points[2] - 3 - 1; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp)", "context_chars": 100, "text": "ma omp parallel for private (k,m)\n for (k = 3; k <= grid_points[2] - 3 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": " * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n }\n }\n k = grid_points[2] - 3;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,k)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,k)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m]);\n }\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,k)", "context_chars": 100, "text": "pragma omp parallel for private (j,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": "m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m]);\n }\n }\n }\n k = grid_points[2] - 2;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,k)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 5.0 * u[i][j][k][m]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j,m)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dssp,k)\n for (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 5.0 * u[i][j][k][m]);\n }\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp,k)", "context_chars": 100, "text": "pragma omp parallel for private (j,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 5.0 * u[i][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": " - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 5.0 * u[i][j][k][m]);\n }\n }\n }\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (i,k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (i,m)\n for (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i) firstprivate (dt)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] * dt;\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": "pragma omp parallel for private (i,j,k,m)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (i,m)\n for (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i) firstprivate (dt)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] * dt;\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,m)", "context_chars": 100, "text": "agma omp parallel for private (i,k,m)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i) firstprivate (dt)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] * dt;\n }\n } #pragma omp parallel for private (i,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (dt)", "context_chars": 100, "text": " += 1) {\n \n#pragma omp parallel for private (i,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] * dt;\n } #pragma omp parallel for private (i) firstprivate (dt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "-----------------------------------------*/\n error_norm(xce);\n compute_rhs();\n rhs_norm(xcr);\n \nfor (m = 0; m <= 4; m += 1) {\n xcr[m] = xcr[m] / dt;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " for (m = 0; m <= 4; m += 1) {\n xcr[m] = xcr[m] / dt;\n }\n *class = 'U';\n *verified = 1;\n \nfor (m = 0; m <= 4; m += 1) {\n xcrref[m] = 1.0;\n xceref[m] = 1.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m,n)", "context_chars": 100, "text": " int i;\n int j;\n int k;\n int m;\n int n;\n for (i = grid_points[0] - 2; i >= 0; i += -1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \n#pragma omp parallel for private (k,m,n)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n for (n = 0; n <= 4; n += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i + 1][j][k][n];\n }\n }\n }\n } #pragma omp parallel for private (j,k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m,n)", "context_chars": 100, "text": "ma omp parallel for private (j,k,m,n)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n for (n = 0; n <= 4; n += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i + 1][j][k][n];\n }\n }\n } #pragma omp parallel for private (k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,n)", "context_chars": 100, "text": " omp parallel for private (k,m,n)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n for (n = 0; n <= 4; n += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i + 1][j][k][n];\n }\n } #pragma omp parallel for private (m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ec - ablock*avec\nc-------------------------------------------------------------------*/\n int i;\n \nfor (i = 0; i <= 4; i += 1) {\n/*--------------------------------------------------------------------\nc rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell) \nc $ - lhs[i,1,ablock,ia,ja,ka,acell)*\nc-------------------------------------------------------------------*/\n bvec[i] = bvec[i] - ablock[i][0] * avec[0] - ablock[i][1] * avec[1] - ablock[i][2] * avec[2] - ablock[i][3] * avec[3] - ablock[i][4] * avec[4];\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "k) from c(i,j,k)\nc-------------------------------------------------------------------*/\n int j;\n \nfor (j = 0; j <= 4; j += 1) {\n cblock[0][j] = cblock[0][j] - ablock[0][0] * bblock[0][j] - ablock[0][1] * bblock[1][j] - ablock[0][2] * bblock[2][j] - ablock[0][3] * bblock[3][j] - ablock[0][4] * bblock[4][j];\n cblock[1][j] = cblock[1][j] - ablock[1][0] * bblock[0][j] - ablock[1][1] * bblock[1][j] - ablock[1][2] * bblock[2][j] - ablock[1][3] * bblock[3][j] - ablock[1][4] * bblock[4][j];\n cblock[2][j] = cblock[2][j] - ablock[2][0] * bblock[0][j] - ablock[2][1] * bblock[1][j] - ablock[2][2] * bblock[2][j] - ablock[2][3] * bblock[3][j] - ablock[2][4] * bblock[4][j];\n cblock[3][j] = cblock[3][j] - ablock[3][0] * bblock[0][j] - ablock[3][1] * bblock[1][j] - ablock[3][2] * bblock[2][j] - ablock[3][3] * bblock[3][j] - ablock[3][4] * bblock[4][j];\n cblock[4][j] = cblock[4][j] - ablock[4][0] * bblock[0][j] - ablock[4][1] * bblock[1][j] - ablock[4][2] * bblock[2][j] - ablock[4][3] * bblock[3][j] - ablock[4][4] * bblock[4][j];\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m,n)", "context_chars": 100, "text": " int i;\n int j;\n int k;\n int m;\n int n;\n for (j = grid_points[1] - 2; j >= 0; j += -1) {\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (k,m,n)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n for (n = 0; n <= 4; n += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i][j + 1][k][n];\n }\n }\n }\n } #pragma omp parallel for private (i,k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (k,m,n)", "context_chars": 100, "text": "ma omp parallel for private (i,k,m,n)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n for (n = 0; n <= 4; n += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i][j + 1][k][n];\n }\n }\n } #pragma omp parallel for private (k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,n)", "context_chars": 100, "text": " omp parallel for private (k,m,n)\n for (k = 1; k <= grid_points[2] - 1 - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n for (n = 0; n <= 4; n += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i][j + 1][k][n];\n }\n } #pragma omp parallel for private (m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m,n)", "context_chars": 100, "text": "-------------------------------------------------*/\n int i;\n int j;\n int k;\n int m;\n int n;\n \nfor (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m,n)\n for (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n for (k = grid_points[2] - 2; k >= 0; k += -1) {\n \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n for (n = 0; n <= 4; n += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i][j][k + 1][n];\n }\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m,n)", "context_chars": 100, "text": "agma omp parallel for private (i,j,k,m,n)\n for (i = 1; i <= grid_points[0] - 1 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 1 - 1; j += 1) {\n for (k = grid_points[2] - 2; k >= 0; k += -1) {\n \n#pragma omp parallel for private (m,n)\n for (m = 0; m <= 4; m += 1) {\n for (n = 0; n <= 4; n += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i][j][k + 1][n];\n }\n }\n }\n } #pragma omp parallel for private (j,k,m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private (m,n)", "context_chars": 100, "text": "<= grid_points[1] - 1 - 1; j += 1) {\n for (k = grid_points[2] - 2; k >= 0; k += -1) {\n \nfor (m = 0; m <= 4; m += 1) {\n for (n = 0; n <= 4; n += 1) {\n rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i][j][k + 1][n];\n }\n } #pragma omp parallel for private (m,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "---------------------------------*/\n{\n for (j = 1; j <= lastrow - firstrow + 1; j += 1) {\n \nfor (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) {\n colidx[k] = colidx[k] - firstcol + 1;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "ector to (1, 1, .... 1)\nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= 14001; i += 1) {\n x[i] = 1.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "gma omp parallel for private (i)\n for (i = 1; i <= 14001; i += 1) {\n x[i] = 1.0;\n }\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n q[j] = 0.0;\n z[j] = 0.0;\n r[j] = 0.0;\n p[j] = 0.0;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j) reduction (+:norm_temp11,norm_temp12)", "context_chars": 100, "text": "----------------------------------------------*/\n norm_temp11 = 0.0;\n norm_temp12 = 0.0;\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n norm_temp11 = norm_temp11 + x[j] * z[j];\n norm_temp12 = norm_temp12 + z[j] * z[j];\n } #pragma omp parallel for private (j) reduction (+:norm_temp11,norm_temp12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (norm_temp12)", "context_chars": 100, "text": "Normalize z to obtain x\nc-------------------------------------------------------------------*/\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n x[j] = norm_temp12 * z[j];\n } #pragma omp parallel for private (j) firstprivate (norm_temp12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " vector to (1, 1, .... 1)\nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= 14001; i += 1) {\n x[i] = 1.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j) reduction (+:norm_temp11,norm_temp12)", "context_chars": 100, "text": "----------------------------------------------*/\n norm_temp11 = 0.0;\n norm_temp12 = 0.0;\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n norm_temp11 = norm_temp11 + x[j] * z[j];\n norm_temp12 = norm_temp12 + z[j] * z[j];\n } #pragma omp parallel for private (j) reduction (+:norm_temp11,norm_temp12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (norm_temp12)", "context_chars": 100, "text": "Normalize z to obtain x\nc-------------------------------------------------------------------*/\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n x[j] = norm_temp12 * z[j];\n } #pragma omp parallel for private (j) firstprivate (norm_temp12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (naa)", "context_chars": 100, "text": "ize the CG algorithm:\nc-------------------------------------------------------------------*/\n{\n \nfor (j = 1; j <= naa + 1; j += 1) {\n q[j] = 0.0;\n z[j] = 0.0;\n r[j] = x[j];\n p[j] = r[j];\n//w[j] = 0.0;\n } #pragma omp parallel for private (j) firstprivate (naa)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j) reduction (+:rho)", "context_chars": 100, "text": "f r elements locally...\nc-------------------------------------------------------------------*/\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n rho = rho + r[j] * r[j];\n } #pragma omp parallel for private (j) reduction (+:rho)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (sum,j,k)", "context_chars": 100, "text": " on the Cray t3d - overall speed of code is 1.5 times faster.\n*/\n/* rolled version */\n \nfor (j = 1; j <= lastrow - firstrow + 1; j += 1) {\n sum = 0.0;\n \n#pragma omp parallel for private (k) reduction (+:sum)\n for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) {\n sum = sum + a[k] * p[colidx[k]];\n }\n//w[j] = sum;\n q[j] = sum;\n } #pragma omp parallel for private (sum,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (k) reduction (+:sum)", "context_chars": 100, "text": "vate (sum,j,k)\n for (j = 1; j <= lastrow - firstrow + 1; j += 1) {\n sum = 0.0;\n \nfor (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) {\n sum = sum + a[k] * p[colidx[k]];\n } #pragma omp parallel for private (k) reduction (+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j) reduction (+:d)", "context_chars": 100, "text": "-------\nc Obtain p.q\nc-------------------------------------------------------------------*/\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n d = d + p[j] * q[j];\n } #pragma omp parallel for private (j) reduction (+:d)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j) reduction (+:rho) firstprivate (alpha)", "context_chars": 100, "text": " r = r - alpha*q\nc---------------------------------------------------------------------*/\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n z[j] = z[j] + alpha * p[j];\n r[j] = r[j] - alpha * q[j];\n//\t} #pragma omp parallel for private (j) reduction (+:rho) firstprivate (alpha)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (beta)", "context_chars": 100, "text": "---\nc p = r + beta*p\nc-------------------------------------------------------------------*/\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n p[j] = r[j] + beta * p[j];\n } #pragma omp parallel for private (j) firstprivate (beta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (d,j,k) firstprivate (firstrow,lastrow)", "context_chars": 100, "text": "ltiply\nc---------------------------------------------------------------------*/\n sum = 0.0;\n{\n \nfor (j = 1; j <= lastrow - firstrow + 1; j += 1) {\n d = 0.0;\n \n#pragma omp parallel for private (k) reduction (+:d)\n for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) {\n d = d + a[k] * z[colidx[k]];\n }\n r[j] = d;\n } #pragma omp parallel for private (d,j,k) firstprivate (firstrow,lastrow)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (k) reduction (+:d)", "context_chars": 100, "text": "ate (firstrow,lastrow)\n for (j = 1; j <= lastrow - firstrow + 1; j += 1) {\n d = 0.0;\n \nfor (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) {\n d = d + a[k] * z[colidx[k]];\n } #pragma omp parallel for private (k) reduction (+:d)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (d,j) reduction (+:sum) firstprivate (firstcol,lastcol)", "context_chars": 100, "text": "s point, r contains A.z\nc-------------------------------------------------------------------*/\n \nfor (j = 1; j <= lastcol - firstcol + 1; j += 1) {\n d = x[j] - r[j];\n sum = sum + d * d;\n } #pragma omp parallel for private (d,j) reduction (+:sum) firstprivate (firstcol,lastcol)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " mark nonzero positions\nc---------------------------------------------------------------------*/\n \nfor (i = 1; i <= n; i += 1) {\n colidx[n + i] = 0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "er of triples in each row\nc-------------------------------------------------------------------*/\n \nfor (j = 1; j <= n; j += 1) {\n rowstr[j] = 0;\n mark[j] = 0;\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "----------------------------------------------------*/\n for (j = 0; j <= nrows - 1; j += 1) {\n \nfor (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) {\n a[k] = 0.0;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (n)", "context_chars": 100, "text": "dding elements\nc-------------------------------------------------------------------*/\n nza = 0;\n \nfor (i = 1; i <= n; i += 1) {\n x[i] = 0.0;\n mark[i] = 0;\n } #pragma omp parallel for private (i) firstprivate (n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "uble v[],\n/* iv[1:*] */\nint iv[],int *nzv,int i,double val)\n{\n int k;\n boolean set;\n set = 0;\n \nfor (k = 1; k <= *nzv; k += 1) {\n if (iv[k] == i) {\n v[k] = val;\n set = 1;\n }\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": "-----------------*/\n int i;\n int j;\n int m;\n double tmp;\n double tmp1;\n double tmat[5][5];\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (k,omega)\n for (m = 0; m <= 4; m += 1) {\n v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": "e tmat[5][5];\n \n#pragma omp parallel for private (i,j,m)\n for (i = ist; i <= iend; i += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (k,omega)\n for (m = 0; m <= 4; m += 1) {\n v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]);\n }\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (k,omega)", "context_chars": 100, "text": "i += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = jst; j <= jend; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]);\n } #pragma omp parallel for private (m) firstprivate (k,omega)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (omega)", "context_chars": 100, "text": " == 1) {\n//\t ;\n//\t}\n// }\n//#endif /* _OPENMP */\n for (j = jst; j <= jend; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]);\n } #pragma omp parallel for private (m) firstprivate (omega)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " forward elimination\n--------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n tmat[m][0] = d[i][j][m][0];\n tmat[m][1] = d[i][j][m][1];\n tmat[m][2] = d[i][j][m][2];\n tmat[m][3] = d[i][j][m][3];\n tmat[m][4] = d[i][j][m][4];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": "-----------------*/\n int i;\n int j;\n int m;\n double tmp;\n double tmp1;\n double tmat[5][5];\n \nfor (i = iend; i >= ist; i += -1) {\n \n#pragma omp parallel for private (j,m)\n for (j = jend; j >= jst; j += -1) {\n \n#pragma omp parallel for private (m) firstprivate (k,omega)\n for (m = 0; m <= 4; m += 1) {\n tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": " tmat[5][5];\n \n#pragma omp parallel for private (i,j,m)\n for (i = iend; i >= ist; i += -1) {\n \nfor (j = jend; j >= jst; j += -1) {\n \n#pragma omp parallel for private (m) firstprivate (k,omega)\n for (m = 0; m <= 4; m += 1) {\n tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]);\n }\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (k,omega)", "context_chars": 100, "text": "+= -1) {\n \n#pragma omp parallel for private (j,m)\n for (j = jend; j >= jst; j += -1) {\n \nfor (m = 0; m <= 4; m += 1) {\n tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]);\n } #pragma omp parallel for private (m) firstprivate (k,omega)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (omega)", "context_chars": 100, "text": "== 1) {\n//\t;\n// }\n// }\n//#endif /* _OPENMP */\n for (j = jend; j >= jst; j += -1) {\n \nfor (m = 0; m <= 4; m += 1) {\n tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]);\n } #pragma omp parallel for private (m) firstprivate (omega)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "gonal block inversion\n--------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n tmat[m][0] = d[i][j][m][0];\n tmat[m][1] = d[i][j][m][1];\n tmat[m][2] = d[i][j][m][2];\n tmat[m][3] = d[i][j][m][3];\n tmat[m][4] = d[i][j][m][4];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "\n double u21km1;\n double u31km1;\n double u41km1;\n double u51km1;\n dsspm = dssp;\n \nfor (i = 0; i <= nx - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= ny - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= nz - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = 0.0;\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "dssp;\n \n#pragma omp parallel for private (i,j,k,m)\n for (i = 0; i <= nx - 1; i += 1) {\n \nfor (j = 0; j <= ny - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= nz - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = 0.0;\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "{\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= ny - 1; j += 1) {\n \nfor (k = 0; k <= nz - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = 0.0;\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= nz - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = 0.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (iglob,jglob,xi,eta,zeta,i,j,k,m) firstprivate (nx0)", "context_chars": 100, "text": "= 0; m <= 4; m += 1) {\n frct[i][j][k][m] = 0.0;\n }\n }\n }\n }\n \nfor (i = 0; i <= nx - 1; i += 1) {\n iglob = i;\n xi = ((double )iglob) / (nx0 - 1);\n \n#pragma omp parallel for private (jglob,eta,zeta,j,k,m) firstprivate (ny0)\n for (j = 0; j <= ny - 1; j += 1) {\n jglob = j;\n eta = ((double )jglob) / (ny0 - 1);\n \n#pragma omp parallel for private (zeta,k,m)\n for (k = 0; k <= nz - 1; k += 1) {\n zeta = ((double )k) / (nz - 1);\n \n#pragma omp parallel for private (m) firstprivate (xi,eta,zeta)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;\n }\n }\n }\n } #pragma omp parallel for private (iglob,jglob,xi,eta,zeta,i,j,k,m) firstprivate (nx0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (jglob,eta,zeta,j,k,m) firstprivate (ny0)", "context_chars": 100, "text": "for (i = 0; i <= nx - 1; i += 1) {\n iglob = i;\n xi = ((double )iglob) / (nx0 - 1);\n \nfor (j = 0; j <= ny - 1; j += 1) {\n jglob = j;\n eta = ((double )jglob) / (ny0 - 1);\n \n#pragma omp parallel for private (zeta,k,m)\n for (k = 0; k <= nz - 1; k += 1) {\n zeta = ((double )k) / (nz - 1);\n \n#pragma omp parallel for private (m) firstprivate (xi,eta,zeta)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;\n }\n }\n } #pragma omp parallel for private (jglob,eta,zeta,j,k,m) firstprivate (ny0)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (zeta,k,m)", "context_chars": 100, "text": "= 0; j <= ny - 1; j += 1) {\n jglob = j;\n eta = ((double )jglob) / (ny0 - 1);\n \nfor (k = 0; k <= nz - 1; k += 1) {\n zeta = ((double )k) / (nz - 1);\n \n#pragma omp parallel for private (m) firstprivate (xi,eta,zeta)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;\n }\n } #pragma omp parallel for private (zeta,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (xi,eta,zeta)", "context_chars": 100, "text": ",m)\n for (k = 0; k <= nz - 1; k += 1) {\n zeta = ((double )k) / (nz - 1);\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;\n } #pragma omp parallel for private (m) firstprivate (xi,eta,zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2)", "context_chars": 100, "text": "---------------------------------------------------------------*/\n L1 = 0;\n L2 = nx - 1;\n \nfor (i = L1; i <= L2; i += 1) {\n \n#pragma omp parallel for private (q,u21,j,k)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (q,u21,k)\n for (k = 1; k <= nz - 1 - 1; k += 1) {\n flux[i][j][k][0] = rsd[i][j][k][1];\n u21 = rsd[i][j][k][1] / rsd[i][j][k][0];\n q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];\n flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q);\n flux[i][j][k][2] = rsd[i][j][k][2] * u21;\n flux[i][j][k][3] = rsd[i][j][k][3] * u21;\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21;\n }\n }\n } #pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u21,j,k)", "context_chars": 100, "text": "omp parallel for private (q,u21,i,j,k) firstprivate (L2)\n for (i = L1; i <= L2; i += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (q,u21,k)\n for (k = 1; k <= nz - 1 - 1; k += 1) {\n flux[i][j][k][0] = rsd[i][j][k][1];\n u21 = rsd[i][j][k][1] / rsd[i][j][k][0];\n q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];\n flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q);\n flux[i][j][k][2] = rsd[i][j][k][2] * u21;\n flux[i][j][k][3] = rsd[i][j][k][3] * u21;\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21;\n }\n } #pragma omp parallel for private (q,u21,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u21,k)", "context_chars": 100, "text": " \n#pragma omp parallel for private (q,u21,j,k)\n for (j = jst; j <= jend; j += 1) {\n \nfor (k = 1; k <= nz - 1 - 1; k += 1) {\n flux[i][j][k][0] = rsd[i][j][k][1];\n u21 = rsd[i][j][k][1] / rsd[i][j][k][0];\n q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];\n flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q);\n flux[i][j][k][2] = rsd[i][j][k][2] * u21;\n flux[i][j][k][3] = rsd[i][j][k][3] * u21;\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21;\n } #pragma omp parallel for private (q,u21,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,j,k,m)", "context_chars": 100, "text": " flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21;\n }\n }\n }\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,k,m) firstprivate (nx,L2)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (i,m)\n for (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tx2)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n }\n }\n \n#pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,tmp,u21i,u31i,u41i,u51i,i)\n for (i = ist; i <= L2; i += 1) {\n tmp = 1.0 / rsd[i][j][k][0];\n u21i = tmp * rsd[i][j][k][1];\n u31i = tmp * rsd[i][j][k][2];\n u41i = tmp * rsd[i][j][k][3];\n u51i = tmp * rsd[i][j][k][4];\n tmp = 1.0 / rsd[i - 1][j][k][0];\n u21im1 = tmp * rsd[i - 1][j][k][1];\n u31im1 = tmp * rsd[i - 1][j][k][2];\n u41im1 = tmp * rsd[i - 1][j][k][3];\n u51im1 = tmp * rsd[i - 1][j][k][4];\n flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1);\n flux[i][j][k][2] = tx3 * (u31i - u31im1);\n flux[i][j][k][3] = tx3 * (u41i - u41im1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);\n }\n \n#pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5)\n for (i = ist; i <= iend; i += 1) {\n frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]);\n frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]);\n frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]);\n frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]);\n frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]);\n }\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]);\n frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (- 4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]);\n }\n ist1 = 3;\n iend1 = nx - 4;\n \n#pragma omp parallel for private (i,m) firstprivate (iend1)\n for (i = ist1; i <= iend1; i += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dsspm)\n for (m = 0; m <= 4; m += 1) {\n frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]);\n frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]);\n }\n }\n } #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,k,m) firstprivate (nx,L2)", "context_chars": 100, "text": "m1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,j,k,m)\n for (j = jst; j <= jend; j += 1) {\n \nfor (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (i,m)\n for (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tx2)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n }\n }\n \n#pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,tmp,u21i,u31i,u41i,u51i,i)\n for (i = ist; i <= L2; i += 1) {\n tmp = 1.0 / rsd[i][j][k][0];\n u21i = tmp * rsd[i][j][k][1];\n u31i = tmp * rsd[i][j][k][2];\n u41i = tmp * rsd[i][j][k][3];\n u51i = tmp * rsd[i][j][k][4];\n tmp = 1.0 / rsd[i - 1][j][k][0];\n u21im1 = tmp * rsd[i - 1][j][k][1];\n u31im1 = tmp * rsd[i - 1][j][k][2];\n u41im1 = tmp * rsd[i - 1][j][k][3];\n u51im1 = tmp * rsd[i - 1][j][k][4];\n flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1);\n flux[i][j][k][2] = tx3 * (u31i - u31im1);\n flux[i][j][k][3] = tx3 * (u41i - u41im1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);\n }\n \n#pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5)\n for (i = ist; i <= iend; i += 1) {\n frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]);\n frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]);\n frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]);\n frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]);\n frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]);\n }\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]);\n frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (- 4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]);\n }\n ist1 = 3;\n iend1 = nx - 4;\n \n#pragma omp parallel for private (i,m) firstprivate (iend1)\n for (i = ist1; i <= iend1; i += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dsspm)\n for (m = 0; m <= 4; m += 1) {\n frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]);\n frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]);\n }\n } #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,k,m) firstprivate (nx,L2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,m)", "context_chars": 100, "text": "p,u21i,u31i,u41i,u51i,i,k,m) firstprivate (nx,L2)\n for (k = 1; k <= nz - 2; k += 1) {\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tx2)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n }\n } #pragma omp parallel for private (i,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (tx2)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,m)\n for (i = ist; i <= iend; i += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (tx2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,tmp,u21i,u31i,u41i,u51i,i)", "context_chars": 100, "text": "t[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n }\n }\n \nfor (i = ist; i <= L2; i += 1) {\n tmp = 1.0 / rsd[i][j][k][0];\n u21i = tmp * rsd[i][j][k][1];\n u31i = tmp * rsd[i][j][k][2];\n u41i = tmp * rsd[i][j][k][3];\n u51i = tmp * rsd[i][j][k][4];\n tmp = 1.0 / rsd[i - 1][j][k][0];\n u21im1 = tmp * rsd[i - 1][j][k][1];\n u31im1 = tmp * rsd[i - 1][j][k][2];\n u41im1 = tmp * rsd[i - 1][j][k][3];\n u51im1 = tmp * rsd[i - 1][j][k][4];\n flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1);\n flux[i][j][k][2] = tx3 * (u31i - u31im1);\n flux[i][j][k][3] = tx3 * (u41i - u41im1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);\n } #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,tmp,u21i,u31i,u41i,u51i,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5)", "context_chars": 100, "text": "* (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);\n }\n \nfor (i = ist; i <= iend; i += 1) {\n frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]);\n frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]);\n frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]);\n frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]);\n frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]);\n } #pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "h-order dissipation\n--------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]);\n frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (- 4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,m) firstprivate (iend1)", "context_chars": 100, "text": " * rsd[3][j][k][m] + rsd[4][j][k][m]);\n }\n ist1 = 3;\n iend1 = nx - 4;\n \nfor (i = ist1; i <= iend1; i += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]);\n }\n } #pragma omp parallel for private (i,m) firstprivate (iend1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "llel for private (i,m) firstprivate (iend1)\n for (i = ist1; i <= iend1; i += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dsspm)", "context_chars": 100, "text": " rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]);\n frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dsspm)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2)", "context_chars": 100, "text": "---------------------------------------------------------------*/\n L1 = 0;\n L2 = ny - 1;\n \nfor (i = ist; i <= iend; i += 1) {\n//firstprivate(iend ,ist ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n \n#pragma omp parallel for private (q,u31,j,k)\n for (j = L1; j <= L2; j += 1) {\n \n#pragma omp parallel for private (q,u31,k)\n for (k = 1; k <= nz - 2; k += 1) {\n flux[i][j][k][0] = rsd[i][j][k][2];\n u31 = rsd[i][j][k][2] / rsd[i][j][k][0];\n q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];\n flux[i][j][k][1] = rsd[i][j][k][1] * u31;\n flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q);\n flux[i][j][k][3] = rsd[i][j][k][3] * u31;\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31;\n }\n }\n } #pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u31,j,k)", "context_chars": 100, "text": " for (i = ist; i <= iend; i += 1) {\n//firstprivate(iend ,ist ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n \nfor (j = L1; j <= L2; j += 1) {\n \n#pragma omp parallel for private (q,u31,k)\n for (k = 1; k <= nz - 2; k += 1) {\n flux[i][j][k][0] = rsd[i][j][k][2];\n u31 = rsd[i][j][k][2] / rsd[i][j][k][0];\n q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];\n flux[i][j][k][1] = rsd[i][j][k][1] * u31;\n flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q);\n flux[i][j][k][3] = rsd[i][j][k][3] * u31;\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31;\n }\n } #pragma omp parallel for private (q,u31,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u31,k)", "context_chars": 100, "text": "\n \n#pragma omp parallel for private (q,u31,j,k)\n for (j = L1; j <= L2; j += 1) {\n \nfor (k = 1; k <= nz - 2; k += 1) {\n flux[i][j][k][0] = rsd[i][j][k][2];\n u31 = rsd[i][j][k][2] / rsd[i][j][k][0];\n q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];\n flux[i][j][k][1] = rsd[i][j][k][1] * u31;\n flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q);\n flux[i][j][k][3] = rsd[i][j][k][3] * u31;\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31;\n } #pragma omp parallel for private (q,u31,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,i,j,k,m) firstprivate (nz)", "context_chars": 100, "text": " flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31;\n }\n }\n }\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,j,k,m) firstprivate (ny,L2)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (ty2)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n }\n }\n \n#pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,tmp,u21j,u31j,u41j,u51j,j)\n for (j = jst; j <= L2; j += 1) {\n tmp = 1.0 / rsd[i][j][k][0];\n u21j = tmp * rsd[i][j][k][1];\n u31j = tmp * rsd[i][j][k][2];\n u41j = tmp * rsd[i][j][k][3];\n u51j = tmp * rsd[i][j][k][4];\n tmp = 1.0 / rsd[i][j - 1][k][0];\n u21jm1 = tmp * rsd[i][j - 1][k][1];\n u31jm1 = tmp * rsd[i][j - 1][k][2];\n u41jm1 = tmp * rsd[i][j - 1][k][3];\n u51jm1 = tmp * rsd[i][j - 1][k][4];\n flux[i][j][k][1] = ty3 * (u21j - u21jm1);\n flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1);\n flux[i][j][k][3] = ty3 * (u41j - u41jm1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);\n }\n \n#pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5)\n for (j = jst; j <= jend; j += 1) {\n frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]);\n frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]);\n frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]);\n frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]);\n frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]);\n }\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]);\n frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (- 4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]);\n }\n jst1 = 3;\n jend1 = ny - 4;\n \n#pragma omp parallel for private (j,m) firstprivate (jend1)\n for (j = jst1; j <= jend1; j += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dsspm)\n for (m = 0; m <= 4; m += 1) {\n frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]);\n frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]);\n }\n }\n } #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,i,j,k,m) firstprivate (nz)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,j,k,m) firstprivate (ny,L2)", "context_chars": 100, "text": "d1,tmp,u21j,u31j,u41j,u51j,i,j,k,m) firstprivate (nz)\n for (i = ist; i <= iend; i += 1) {\n \nfor (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (ty2)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n }\n }\n \n#pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,tmp,u21j,u31j,u41j,u51j,j)\n for (j = jst; j <= L2; j += 1) {\n tmp = 1.0 / rsd[i][j][k][0];\n u21j = tmp * rsd[i][j][k][1];\n u31j = tmp * rsd[i][j][k][2];\n u41j = tmp * rsd[i][j][k][3];\n u51j = tmp * rsd[i][j][k][4];\n tmp = 1.0 / rsd[i][j - 1][k][0];\n u21jm1 = tmp * rsd[i][j - 1][k][1];\n u31jm1 = tmp * rsd[i][j - 1][k][2];\n u41jm1 = tmp * rsd[i][j - 1][k][3];\n u51jm1 = tmp * rsd[i][j - 1][k][4];\n flux[i][j][k][1] = ty3 * (u21j - u21jm1);\n flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1);\n flux[i][j][k][3] = ty3 * (u41j - u41jm1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);\n }\n \n#pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5)\n for (j = jst; j <= jend; j += 1) {\n frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]);\n frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]);\n frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]);\n frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]);\n frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]);\n }\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]);\n frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (- 4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]);\n }\n jst1 = 3;\n jend1 = ny - 4;\n \n#pragma omp parallel for private (j,m) firstprivate (jend1)\n for (j = jst1; j <= jend1; j += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dsspm)\n for (m = 0; m <= 4; m += 1) {\n frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]);\n frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]);\n }\n } #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,j,k,m) firstprivate (ny,L2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": "p,u21j,u31j,u41j,u51j,j,k,m) firstprivate (ny,L2)\n for (k = 1; k <= nz - 2; k += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (ty2)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n }\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (ty2)", "context_chars": 100, "text": " \n#pragma omp parallel for private (j,m)\n for (j = jst; j <= jend; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n } #pragma omp parallel for private (m) firstprivate (ty2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,tmp,u21j,u31j,u41j,u51j,j)", "context_chars": 100, "text": "t[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n }\n }\n \nfor (j = jst; j <= L2; j += 1) {\n tmp = 1.0 / rsd[i][j][k][0];\n u21j = tmp * rsd[i][j][k][1];\n u31j = tmp * rsd[i][j][k][2];\n u41j = tmp * rsd[i][j][k][3];\n u51j = tmp * rsd[i][j][k][4];\n tmp = 1.0 / rsd[i][j - 1][k][0];\n u21jm1 = tmp * rsd[i][j - 1][k][1];\n u31jm1 = tmp * rsd[i][j - 1][k][2];\n u41jm1 = tmp * rsd[i][j - 1][k][3];\n u51jm1 = tmp * rsd[i][j - 1][k][4];\n flux[i][j][k][1] = ty3 * (u21j - u21jm1);\n flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1);\n flux[i][j][k][3] = ty3 * (u41j - u41jm1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);\n } #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,tmp,u21j,u31j,u41j,u51j,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5)", "context_chars": 100, "text": "* (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);\n }\n \nfor (j = jst; j <= jend; j += 1) {\n frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]);\n frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]);\n frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]);\n frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]);\n frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]);\n } #pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "h-order dissipation\n--------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]);\n frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (- 4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,m) firstprivate (jend1)", "context_chars": 100, "text": " * rsd[i][3][k][m] + rsd[i][4][k][m]);\n }\n jst1 = 3;\n jend1 = ny - 4;\n \nfor (j = jst1; j <= jend1; j += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]);\n }\n } #pragma omp parallel for private (j,m) firstprivate (jend1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "llel for private (j,m) firstprivate (jend1)\n for (j = jst1; j <= jend1; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dsspm)", "context_chars": 100, "text": " rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]);\n frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dsspm)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,i,j,k,m) firstprivate (iend,jst,jend)", "context_chars": 100, "text": "ection flux differences\n--------------------------------------------------------------------*/\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,j,k,m) firstprivate (nz)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (q,u41,k)\n for (k = 0; k <= nz - 1; k += 1) {\n flux[i][j][k][0] = rsd[i][j][k][3];\n u41 = rsd[i][j][k][3] / rsd[i][j][k][0];\n q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];\n flux[i][j][k][1] = rsd[i][j][k][1] * u41;\n flux[i][j][k][2] = rsd[i][j][k][2] * u41;\n flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q);\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41;\n }\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tz2)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n }\n }\n \n#pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,tmp,u21k,k)\n for (k = 1; k <= nz - 1; k += 1) {\n tmp = 1.0 / rsd[i][j][k][0];\n u21k = tmp * rsd[i][j][k][1];\n u31k = tmp * rsd[i][j][k][2];\n u41k = tmp * rsd[i][j][k][3];\n u51k = tmp * rsd[i][j][k][4];\n tmp = 1.0 / rsd[i][j][k - 1][0];\n u21km1 = tmp * rsd[i][j][k - 1][1];\n u31km1 = tmp * rsd[i][j][k - 1][2];\n u41km1 = tmp * rsd[i][j][k - 1][3];\n u51km1 = tmp * rsd[i][j][k - 1][4];\n flux[i][j][k][1] = tz3 * (u21k - u21km1);\n flux[i][j][k][2] = tz3 * (u31k - u31km1);\n flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);\n }\n \n#pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5)\n for (k = 1; k <= nz - 2; k += 1) {\n frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]);\n frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]);\n frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]);\n frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]);\n frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]);\n }\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]);\n frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (- 4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]);\n }\n \n#pragma omp parallel for private (k,m)\n for (k = 3; k <= nz - 4; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dsspm)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]);\n frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]);\n }\n }\n } #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,i,j,k,m) firstprivate (iend,jst,jend)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,j,k,m) firstprivate (nz)", "context_chars": 100, "text": ",q,u41,tmp,u21k,i,j,k,m) firstprivate (iend,jst,jend)\n for (i = ist; i <= iend; i += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (q,u41,k)\n for (k = 0; k <= nz - 1; k += 1) {\n flux[i][j][k][0] = rsd[i][j][k][3];\n u41 = rsd[i][j][k][3] / rsd[i][j][k][0];\n q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];\n flux[i][j][k][1] = rsd[i][j][k][1] * u41;\n flux[i][j][k][2] = rsd[i][j][k][2] * u41;\n flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q);\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41;\n }\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tz2)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n }\n }\n \n#pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,tmp,u21k,k)\n for (k = 1; k <= nz - 1; k += 1) {\n tmp = 1.0 / rsd[i][j][k][0];\n u21k = tmp * rsd[i][j][k][1];\n u31k = tmp * rsd[i][j][k][2];\n u41k = tmp * rsd[i][j][k][3];\n u51k = tmp * rsd[i][j][k][4];\n tmp = 1.0 / rsd[i][j][k - 1][0];\n u21km1 = tmp * rsd[i][j][k - 1][1];\n u31km1 = tmp * rsd[i][j][k - 1][2];\n u41km1 = tmp * rsd[i][j][k - 1][3];\n u51km1 = tmp * rsd[i][j][k - 1][4];\n flux[i][j][k][1] = tz3 * (u21k - u21km1);\n flux[i][j][k][2] = tz3 * (u31k - u31km1);\n flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);\n }\n \n#pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5)\n for (k = 1; k <= nz - 2; k += 1) {\n frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]);\n frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]);\n frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]);\n frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]);\n frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]);\n }\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]);\n frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (- 4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]);\n }\n \n#pragma omp parallel for private (k,m)\n for (k = 3; k <= nz - 4; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dsspm)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]);\n frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]);\n }\n } #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,j,k,m) firstprivate (nz)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u41,k)", "context_chars": 100, "text": "m1,u51km1,q,u41,tmp,u21k,j,k,m) firstprivate (nz)\n for (j = jst; j <= jend; j += 1) {\n \nfor (k = 0; k <= nz - 1; k += 1) {\n flux[i][j][k][0] = rsd[i][j][k][3];\n u41 = rsd[i][j][k][3] / rsd[i][j][k][0];\n q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];\n flux[i][j][k][1] = rsd[i][j][k][1] * u41;\n flux[i][j][k][2] = rsd[i][j][k][2] * u41;\n flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q);\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41;\n } #pragma omp parallel for private (q,u41,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "\n flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41;\n }\n \nfor (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tz2)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (tz2)", "context_chars": 100, "text": " \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n } #pragma omp parallel for private (m) firstprivate (tz2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,tmp,u21k,k)", "context_chars": 100, "text": "t[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n }\n }\n \nfor (k = 1; k <= nz - 1; k += 1) {\n tmp = 1.0 / rsd[i][j][k][0];\n u21k = tmp * rsd[i][j][k][1];\n u31k = tmp * rsd[i][j][k][2];\n u41k = tmp * rsd[i][j][k][3];\n u51k = tmp * rsd[i][j][k][4];\n tmp = 1.0 / rsd[i][j][k - 1][0];\n u21km1 = tmp * rsd[i][j][k - 1][1];\n u31km1 = tmp * rsd[i][j][k - 1][2];\n u41km1 = tmp * rsd[i][j][k - 1][3];\n u51km1 = tmp * rsd[i][j][k - 1][4];\n flux[i][j][k][1] = tz3 * (u21k - u21km1);\n flux[i][j][k][2] = tz3 * (u31k - u31km1);\n flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);\n } #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,tmp,u21k,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5)", "context_chars": 100, "text": "* (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);\n }\n \nfor (k = 1; k <= nz - 2; k += 1) {\n frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]);\n frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]);\n frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]);\n frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]);\n frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]);\n } #pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "h-order dissipation\n--------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]);\n frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (- 4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]);\n }\n \nfor (k = 3; k <= nz - 4; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " \n#pragma omp parallel for private (k,m)\n for (k = 3; k <= nz - 4; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dsspm)", "context_chars": 100, "text": " rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]);\n frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]);\n } #pragma omp parallel for private (m) firstprivate (dsspm)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " int i;\n int j;\n int k;\n int m;\n int iglob;\n int jglob;\n double tmp;\n double u000ijk[5];\n \nfor (m = 0; m <= 4; m += 1) {\n errnm[m] = 0.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (tmp,m)", "context_chars": 100, "text": " jglob = j;\n for (k = 1; k <= nz - 2; k += 1) {\n exact(iglob,jglob,k,u000ijk);\n \nfor (m = 0; m <= 4; m += 1) {\n tmp = u000ijk[m] - u[i][j][k][m];\n errnm[m] = errnm[m] + tmp * tmp;\n } #pragma omp parallel for private (tmp,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (xi,eta,zeta)", "context_chars": 100, "text": " = ((double )i) / (nx0 - 1);\n eta = ((double )j) / (ny0 - 1);\n zeta = ((double )k) / (nz - 1);\n \nfor (m = 0; m <= 4; m += 1) {\n u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;\n } #pragma omp parallel for private (m) firstprivate (xi,eta,zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,i,j) firstprivate (iend,jst,jend)", "context_chars": 100, "text": "3 = 4.0 / 3.0;\n c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00;\n c34 = 1.00e-01 * 1.00e+00;\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt)\n for (j = jst; j <= jend; j += 1) {\n/*--------------------------------------------------------------------\nc form the block daigonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1);\n d[i][j][0][1] = 0.0;\n d[i][j][0][2] = 0.0;\n d[i][j][0][3] = 0.0;\n d[i][j][0][4] = 0.0;\n d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1]));\n d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2);\n d[i][j][1][2] = 0.0;\n d[i][j][1][3] = 0.0;\n d[i][j][1][4] = 0.0;\n d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2]));\n d[i][j][2][1] = 0.0;\n d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3);\n d[i][j][2][3] = 0.0;\n d[i][j][2][4] = 0.0;\n d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3]));\n d[i][j][3][1] = 0.0;\n d[i][j][3][2] = 0.0;\n d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4);\n d[i][j][3][4] = 0.0;\n d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]));\n d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]);\n d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]);\n d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]);\n d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5);\n/*--------------------------------------------------------------------\nc form the first block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k - 1][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n a[i][j][0][0] = -dt * tz1 * dz1;\n a[i][j][0][1] = 0.0;\n a[i][j][0][2] = 0.0;\n a[i][j][0][3] = -dt * tz2;\n a[i][j][0][4] = 0.0;\n a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]);\n a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2;\n a[i][j][1][2] = 0.0;\n a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1);\n a[i][j][1][4] = 0.0;\n a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]);\n a[i][j][2][1] = 0.0;\n a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3;\n a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1);\n a[i][j][2][4] = 0.0;\n a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]);\n a[i][j][3][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * tmp1));\n a[i][j][3][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * tmp1));\n a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4;\n a[i][j][3][4] = -dt * tz2 * 0.40e+00;\n a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]);\n a[i][j][4][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1];\n a[i][j][4][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2];\n a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3];\n a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5;\n/*--------------------------------------------------------------------\nc form the second block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j - 1][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n b[i][j][0][0] = -dt * ty1 * dy1;\n b[i][j][0][1] = 0.0;\n b[i][j][0][2] = -dt * ty2;\n b[i][j][0][3] = 0.0;\n b[i][j][0][4] = 0.0;\n b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]);\n b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2;\n b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1);\n b[i][j][1][3] = 0.0;\n b[i][j][1][4] = 0.0;\n b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]);\n b[i][j][2][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * tmp1));\n b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3;\n b[i][j][2][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][3] * tmp1));\n b[i][j][2][4] = -dt * ty2 * 0.40e+00;\n b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]);\n b[i][j][3][1] = 0.0;\n b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1);\n b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4;\n b[i][j][3][4] = 0.0;\n b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j - 1][k][1] * u[i][j - 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j - 1][k][2] * u[i][j - 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j - 1][k][3] * u[i][j - 1][k][3]) - c1345 * tmp2 * u[i][j - 1][k][4]);\n b[i][j][4][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1];\n b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2];\n b[i][j][4][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3];\n b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5;\n/*--------------------------------------------------------------------\nc form the third block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i - 1][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n c[i][j][0][0] = -dt * tx1 * dx1;\n c[i][j][0][1] = -dt * tx2;\n c[i][j][0][2] = 0.0;\n c[i][j][0][3] = 0.0;\n c[i][j][0][4] = 0.0;\n c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]);\n c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2;\n c[i][j][1][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * tmp1));\n c[i][j][1][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * tmp1));\n c[i][j][1][4] = -dt * tx2 * 0.40e+00;\n c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]);\n c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1);\n c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3;\n c[i][j][2][3] = 0.0;\n c[i][j][2][4] = 0.0;\n c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]);\n c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1);\n c[i][j][3][2] = 0.0;\n c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4;\n c[i][j][3][4] = 0.0;\n c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i - 1][j][k][1] * u[i - 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][2] * u[i - 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][3] * u[i - 1][j][k][3]) - c1345 * tmp2 * u[i - 1][j][k][4]);\n c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1];\n c[i][j][4][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2];\n c[i][j][4][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3];\n c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5;\n }\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j) firstprivate (iend,jst,jend)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt)", "context_chars": 100, "text": "private (tmp1,tmp2,tmp3,i,j) firstprivate (iend,jst,jend)\n for (i = ist; i <= iend; i += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n/*--------------------------------------------------------------------\nc form the block daigonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1);\n d[i][j][0][1] = 0.0;\n d[i][j][0][2] = 0.0;\n d[i][j][0][3] = 0.0;\n d[i][j][0][4] = 0.0;\n d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1]));\n d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2);\n d[i][j][1][2] = 0.0;\n d[i][j][1][3] = 0.0;\n d[i][j][1][4] = 0.0;\n d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2]));\n d[i][j][2][1] = 0.0;\n d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3);\n d[i][j][2][3] = 0.0;\n d[i][j][2][4] = 0.0;\n d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3]));\n d[i][j][3][1] = 0.0;\n d[i][j][3][2] = 0.0;\n d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4);\n d[i][j][3][4] = 0.0;\n d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]));\n d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]);\n d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]);\n d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]);\n d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5);\n/*--------------------------------------------------------------------\nc form the first block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k - 1][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n a[i][j][0][0] = -dt * tz1 * dz1;\n a[i][j][0][1] = 0.0;\n a[i][j][0][2] = 0.0;\n a[i][j][0][3] = -dt * tz2;\n a[i][j][0][4] = 0.0;\n a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]);\n a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2;\n a[i][j][1][2] = 0.0;\n a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1);\n a[i][j][1][4] = 0.0;\n a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]);\n a[i][j][2][1] = 0.0;\n a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3;\n a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1);\n a[i][j][2][4] = 0.0;\n a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]);\n a[i][j][3][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * tmp1));\n a[i][j][3][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * tmp1));\n a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4;\n a[i][j][3][4] = -dt * tz2 * 0.40e+00;\n a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]);\n a[i][j][4][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1];\n a[i][j][4][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2];\n a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3];\n a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5;\n/*--------------------------------------------------------------------\nc form the second block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j - 1][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n b[i][j][0][0] = -dt * ty1 * dy1;\n b[i][j][0][1] = 0.0;\n b[i][j][0][2] = -dt * ty2;\n b[i][j][0][3] = 0.0;\n b[i][j][0][4] = 0.0;\n b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]);\n b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2;\n b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1);\n b[i][j][1][3] = 0.0;\n b[i][j][1][4] = 0.0;\n b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]);\n b[i][j][2][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * tmp1));\n b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3;\n b[i][j][2][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][3] * tmp1));\n b[i][j][2][4] = -dt * ty2 * 0.40e+00;\n b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]);\n b[i][j][3][1] = 0.0;\n b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1);\n b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4;\n b[i][j][3][4] = 0.0;\n b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j - 1][k][1] * u[i][j - 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j - 1][k][2] * u[i][j - 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j - 1][k][3] * u[i][j - 1][k][3]) - c1345 * tmp2 * u[i][j - 1][k][4]);\n b[i][j][4][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1];\n b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2];\n b[i][j][4][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3];\n b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5;\n/*--------------------------------------------------------------------\nc form the third block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i - 1][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n c[i][j][0][0] = -dt * tx1 * dx1;\n c[i][j][0][1] = -dt * tx2;\n c[i][j][0][2] = 0.0;\n c[i][j][0][3] = 0.0;\n c[i][j][0][4] = 0.0;\n c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]);\n c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2;\n c[i][j][1][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * tmp1));\n c[i][j][1][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * tmp1));\n c[i][j][1][4] = -dt * tx2 * 0.40e+00;\n c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]);\n c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1);\n c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3;\n c[i][j][2][3] = 0.0;\n c[i][j][2][4] = 0.0;\n c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]);\n c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1);\n c[i][j][3][2] = 0.0;\n c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4;\n c[i][j][3][4] = 0.0;\n c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i - 1][j][k][1] * u[i - 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][2] * u[i - 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][3] * u[i - 1][j][k][3]) - c1345 * tmp2 * u[i - 1][j][k][4]);\n c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1];\n c[i][j][4][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2];\n c[i][j][4][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3];\n c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5;\n } #pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt)", "context_chars": 100, "text": "private (tmp1,tmp2,tmp3,i,j) firstprivate (ist,jst,jend)\n for (i = iend; i >= ist; i += -1) {\n \nfor (j = jend; j >= jst; j += -1) {\n/*#else\t \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n*/\n/*--------------------------------------------------------------------\nc form the block daigonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1);\n d[i][j][0][1] = 0.0;\n d[i][j][0][2] = 0.0;\n d[i][j][0][3] = 0.0;\n d[i][j][0][4] = 0.0;\n d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1]));\n d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2);\n d[i][j][1][2] = 0.0;\n d[i][j][1][3] = 0.0;\n d[i][j][1][4] = 0.0;\n d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2]));\n d[i][j][2][1] = 0.0;\n d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3);\n d[i][j][2][3] = 0.0;\n d[i][j][2][4] = 0.0;\n d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3]));\n d[i][j][3][1] = 0.0;\n d[i][j][3][2] = 0.0;\n d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4);\n d[i][j][3][4] = 0.0;\n d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]));\n d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]);\n d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]);\n d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]);\n d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5);\n/*--------------------------------------------------------------------\nc form the first block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i + 1][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n a[i][j][0][0] = -dt * tx1 * dx1;\n a[i][j][0][1] = dt * tx2;\n a[i][j][0][2] = 0.0;\n a[i][j][0][3] = 0.0;\n a[i][j][0][4] = 0.0;\n a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]);\n a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2;\n a[i][j][1][2] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][2] * tmp1));\n a[i][j][1][3] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][3] * tmp1));\n a[i][j][1][4] = dt * tx2 * 0.40e+00;\n a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]);\n a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1);\n a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3;\n a[i][j][2][3] = 0.0;\n a[i][j][2][4] = 0.0;\n a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]);\n a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1);\n a[i][j][3][2] = 0.0;\n a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4;\n a[i][j][3][4] = 0.0;\n a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i + 1][j][k][1] * u[i + 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][2] * u[i + 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][3] * u[i + 1][j][k][3]) - c1345 * tmp2 * u[i + 1][j][k][4]);\n a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1];\n a[i][j][4][2] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2];\n a[i][j][4][3] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3];\n a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5;\n/*--------------------------------------------------------------------\nc form the second block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j + 1][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n b[i][j][0][0] = -dt * ty1 * dy1;\n b[i][j][0][1] = 0.0;\n b[i][j][0][2] = dt * ty2;\n b[i][j][0][3] = 0.0;\n b[i][j][0][4] = 0.0;\n b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]);\n b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2;\n b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1);\n b[i][j][1][3] = 0.0;\n b[i][j][1][4] = 0.0;\n b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]);\n b[i][j][2][1] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][1] * tmp1));\n b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3;\n b[i][j][2][3] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][3] * tmp1));\n b[i][j][2][4] = dt * ty2 * 0.40e+00;\n b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]);\n b[i][j][3][1] = 0.0;\n b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1);\n b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4;\n b[i][j][3][4] = 0.0;\n b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j + 1][k][1] * u[i][j + 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j + 1][k][2] * u[i][j + 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j + 1][k][3] * u[i][j + 1][k][3]) - c1345 * tmp2 * u[i][j + 1][k][4]);\n b[i][j][4][1] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1];\n b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2];\n b[i][j][4][3] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3];\n b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5;\n/*--------------------------------------------------------------------\nc form the third block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k + 1][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n c[i][j][0][0] = -dt * tz1 * dz1;\n c[i][j][0][1] = 0.0;\n c[i][j][0][2] = 0.0;\n c[i][j][0][3] = dt * tz2;\n c[i][j][0][4] = 0.0;\n c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]);\n c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2;\n c[i][j][1][2] = 0.0;\n c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1);\n c[i][j][1][4] = 0.0;\n c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]);\n c[i][j][2][1] = 0.0;\n c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3;\n c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1);\n c[i][j][2][4] = 0.0;\n c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]);\n c[i][j][3][1] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][1] * tmp1));\n c[i][j][3][2] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][2] * tmp1));\n c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4;\n c[i][j][3][4] = dt * tz2 * 0.40e+00;\n c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k + 1][1] * u[i][j][k + 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k + 1][2] * u[i][j][k + 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k + 1][3] * u[i][j][k + 1][3]) - c1345 * tmp2 * u[i][j][k + 1][4]);\n c[i][j][4][1] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1];\n c[i][j][4][2] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2];\n c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3];\n c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5;\n }\n }\n} #pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "0;\n double sum1 = 0.0;\n double sum2 = 0.0;\n double sum3 = 0.0;\n double sum4 = 0.0;\n \nfor (m = 0; m <= 4; m += 1) {\n sum[m] = 0.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k) reduction (+:sum0,sum1,sum2,sum3,sum4) firstprivate (iend,jst,jend)", "context_chars": 100, "text": "ragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n sum[m] = 0.0;\n }\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (j,k) reduction (+:sum0,sum1,sum2,sum3,sum4)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (k) reduction (+:sum0,sum1,sum2,sum3,sum4)\n for (k = 1; k <= nz0 - 2; k += 1) {\n sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];\n sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];\n sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];\n sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];\n sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];\n }\n }\n } #pragma omp parallel for private (i,j,k) reduction (+:sum0,sum1,sum2,sum3,sum4) firstprivate (iend,jst,jend)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,k) reduction (+:sum0,sum1,sum2,sum3,sum4)", "context_chars": 100, "text": "um0,sum1,sum2,sum3,sum4) firstprivate (iend,jst,jend)\n for (i = ist; i <= iend; i += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (k) reduction (+:sum0,sum1,sum2,sum3,sum4)\n for (k = 1; k <= nz0 - 2; k += 1) {\n sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];\n sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];\n sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];\n sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];\n sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];\n }\n } #pragma omp parallel for private (j,k) reduction (+:sum0,sum1,sum2,sum3,sum4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k) reduction (+:sum0,sum1,sum2,sum3,sum4)", "context_chars": 100, "text": "vate (j,k) reduction (+:sum0,sum1,sum2,sum3,sum4)\n for (j = jst; j <= jend; j += 1) {\n \nfor (k = 1; k <= nz0 - 2; k += 1) {\n sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];\n sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];\n sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];\n sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];\n sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];\n } #pragma omp parallel for private (k) reduction (+:sum0,sum1,sum2,sum3,sum4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": "----------\nc initialize\n--------------------------------------------------------------------*/\n \nfor (i = 0; i <= 65; i += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= 65; k += 1) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "-----------------*/\n \n#pragma omp parallel for private (i,k)\n for (i = 0; i <= 65; i += 1) {\n \nfor (k = 0; k <= 65; k += 1) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,iglob,jglob,i,j)", "context_chars": 100, "text": "(k)\n for (k = 0; k <= 65; k += 1) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n }\n \nfor (i = ibeg; i <= ifin; i += 1) {\n iglob = i;\n \n#pragma omp parallel for private (k,jglob,j)\n for (j = jbeg; j <= jfin; j += 1) {\n jglob = j;\n k = ki1;\n phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]);\n k = ki2;\n phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]);\n }\n } #pragma omp parallel for private (k,iglob,jglob,i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,jglob,j)", "context_chars": 100, "text": " parallel for private (k,iglob,jglob,i,j)\n for (i = ibeg; i <= ifin; i += 1) {\n iglob = i;\n \nfor (j = jbeg; j <= jfin; j += 1) {\n jglob = j;\n k = ki1;\n phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]);\n k = ki2;\n phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]);\n } #pragma omp parallel for private (k,jglob,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) reduction (+:frc1)", "context_chars": 100, "text": "k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]);\n }\n }\n frc1 = 0.0;\n \nfor (i = ibeg; i <= ifin1; i += 1) {\n \n#pragma omp parallel for private (j) reduction (+:frc1)\n for (j = jbeg; j <= jfin1; j += 1) {\n frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]);\n }\n } #pragma omp parallel for private (i,j) reduction (+:frc1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j) reduction (+:frc1)", "context_chars": 100, "text": "ragma omp parallel for private (i,j) reduction (+:frc1)\n for (i = ibeg; i <= ifin1; i += 1) {\n \nfor (j = jbeg; j <= jfin1; j += 1) {\n frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]);\n } #pragma omp parallel for private (j) reduction (+:frc1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": "----------\nc initialize\n--------------------------------------------------------------------*/\n \nfor (i = 0; i <= 65; i += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= 65; k += 1) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "-----------------*/\n \n#pragma omp parallel for private (i,k)\n for (i = 0; i <= 65; i += 1) {\n \nfor (k = 0; k <= 65; k += 1) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (iglob,i,k)", "context_chars": 100, "text": " phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n }\n jglob = jbeg;\n if (jglob == ji1) {\n \nfor (i = ibeg; i <= ifin; i += 1) {\n iglob = i;\n \n#pragma omp parallel for private (k)\n for (k = ki1; k <= ki2; k += 1) {\n phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (u[i][jbeg][k][1] * u[i][jbeg][k][1] + u[i][jbeg][k][2] * u[i][jbeg][k][2] + u[i][jbeg][k][3] * u[i][jbeg][k][3]) / u[i][jbeg][k][0]);\n }\n } #pragma omp parallel for private (iglob,i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "mp parallel for private (iglob,i,k)\n for (i = ibeg; i <= ifin; i += 1) {\n iglob = i;\n \nfor (k = ki1; k <= ki2; k += 1) {\n phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (u[i][jbeg][k][1] * u[i][jbeg][k][1] + u[i][jbeg][k][2] * u[i][jbeg][k][2] + u[i][jbeg][k][3] * u[i][jbeg][k][3]) / u[i][jbeg][k][0]);\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (iglob,i,k)", "context_chars": 100, "text": "u[i][jbeg][k][3]) / u[i][jbeg][k][0]);\n }\n }\n }\n jglob = jfin;\n if (jglob == ji2) {\n \nfor (i = ibeg; i <= ifin; i += 1) {\n iglob = i;\n \n#pragma omp parallel for private (k)\n for (k = ki1; k <= ki2; k += 1) {\n phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (u[i][jfin][k][1] * u[i][jfin][k][1] + u[i][jfin][k][2] * u[i][jfin][k][2] + u[i][jfin][k][3] * u[i][jfin][k][3]) / u[i][jfin][k][0]);\n }\n } #pragma omp parallel for private (iglob,i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "mp parallel for private (iglob,i,k)\n for (i = ibeg; i <= ifin; i += 1) {\n iglob = i;\n \nfor (k = ki1; k <= ki2; k += 1) {\n phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (u[i][jfin][k][1] * u[i][jfin][k][1] + u[i][jfin][k][2] * u[i][jfin][k][2] + u[i][jfin][k][3] * u[i][jfin][k][3]) / u[i][jfin][k][0]);\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,k) reduction (+:frc2) firstprivate (ifin1)", "context_chars": 100, "text": "][2] + u[i][jfin][k][3] * u[i][jfin][k][3]) / u[i][jfin][k][0]);\n }\n }\n }\n frc2 = 0.0;\n \nfor (i = ibeg; i <= ifin1; i += 1) {\n \n#pragma omp parallel for private (k) reduction (+:frc2)\n for (k = ki1; k <= ki2 - 1; k += 1) {\n frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]);\n }\n } #pragma omp parallel for private (i,k) reduction (+:frc2) firstprivate (ifin1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k) reduction (+:frc2)", "context_chars": 100, "text": "r private (i,k) reduction (+:frc2) firstprivate (ifin1)\n for (i = ibeg; i <= ifin1; i += 1) {\n \nfor (k = ki1; k <= ki2 - 1; k += 1) {\n frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]);\n } #pragma omp parallel for private (k) reduction (+:frc2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": "----------\nc initialize\n--------------------------------------------------------------------*/\n \nfor (i = 0; i <= 65; i += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= 65; k += 1) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "-----------------*/\n \n#pragma omp parallel for private (i,k)\n for (i = 0; i <= 65; i += 1) {\n \nfor (k = 0; k <= 65; k += 1) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (jglob,j,k)", "context_chars": 100, "text": " phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n }\n iglob = ibeg;\n if (iglob == ii1) {\n \nfor (j = jbeg; j <= jfin; j += 1) {\n jglob = j;\n \n#pragma omp parallel for private (k) firstprivate (ibeg)\n for (k = ki1; k <= ki2; k += 1) {\n phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (u[ibeg][j][k][1] * u[ibeg][j][k][1] + u[ibeg][j][k][2] * u[ibeg][j][k][2] + u[ibeg][j][k][3] * u[ibeg][j][k][3]) / u[ibeg][j][k][0]);\n }\n } #pragma omp parallel for private (jglob,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (ibeg)", "context_chars": 100, "text": "mp parallel for private (jglob,j,k)\n for (j = jbeg; j <= jfin; j += 1) {\n jglob = j;\n \nfor (k = ki1; k <= ki2; k += 1) {\n phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (u[ibeg][j][k][1] * u[ibeg][j][k][1] + u[ibeg][j][k][2] * u[ibeg][j][k][2] + u[ibeg][j][k][3] * u[ibeg][j][k][3]) / u[ibeg][j][k][0]);\n } #pragma omp parallel for private (k) firstprivate (ibeg)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (jglob,j,k) firstprivate (jfin)", "context_chars": 100, "text": "u[ibeg][j][k][3]) / u[ibeg][j][k][0]);\n }\n }\n }\n iglob = ifin;\n if (iglob == ii2) {\n \nfor (j = jbeg; j <= jfin; j += 1) {\n jglob = j;\n \n#pragma omp parallel for private (k) firstprivate (ifin)\n for (k = ki1; k <= ki2; k += 1) {\n phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (u[ifin][j][k][1] * u[ifin][j][k][1] + u[ifin][j][k][2] * u[ifin][j][k][2] + u[ifin][j][k][3] * u[ifin][j][k][3]) / u[ifin][j][k][0]);\n }\n } #pragma omp parallel for private (jglob,j,k) firstprivate (jfin)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (ifin)", "context_chars": 100, "text": "ate (jglob,j,k) firstprivate (jfin)\n for (j = jbeg; j <= jfin; j += 1) {\n jglob = j;\n \nfor (k = ki1; k <= ki2; k += 1) {\n phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (u[ifin][j][k][1] * u[ifin][j][k][1] + u[ifin][j][k][2] * u[ifin][j][k][2] + u[ifin][j][k][3] * u[ifin][j][k][3]) / u[ifin][j][k][0]);\n } #pragma omp parallel for private (k) firstprivate (ifin)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,k) reduction (+:frc3) firstprivate (jfin1,ki1,ki2)", "context_chars": 100, "text": "][2] + u[ifin][j][k][3] * u[ifin][j][k][3]) / u[ifin][j][k][0]);\n }\n }\n }\n frc3 = 0.0;\n \nfor (j = jbeg; j <= jfin1; j += 1) {\n \n#pragma omp parallel for private (k) reduction (+:frc3)\n for (k = ki1; k <= ki2 - 1; k += 1) {\n frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]);\n }\n } #pragma omp parallel for private (j,k) reduction (+:frc3) firstprivate (jfin1,ki1,ki2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k) reduction (+:frc3)", "context_chars": 100, "text": "e (j,k) reduction (+:frc3) firstprivate (jfin1,ki1,ki2)\n for (j = jbeg; j <= jfin1; j += 1) {\n \nfor (k = ki1; k <= ki2 - 1; k += 1) {\n frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]);\n } #pragma omp parallel for private (k) reduction (+:frc3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": " double u51jm1;\n double u21km1;\n double u31km1;\n double u41km1;\n double u51km1;\n \nfor (i = 0; i <= nx - 1; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= ny - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= nz - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = -frct[i][j][k][m];\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "1km1;\n \n#pragma omp parallel for private (i,j,k,m)\n for (i = 0; i <= nx - 1; i += 1) {\n \nfor (j = 0; j <= ny - 1; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= nz - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = -frct[i][j][k][m];\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "{\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= ny - 1; j += 1) {\n \nfor (k = 0; k <= nz - 1; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = -frct[i][j][k][m];\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= nz - 1; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = -frct[i][j][k][m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2)", "context_chars": 100, "text": "---------------------------------------------------------------*/\n L1 = 0;\n L2 = nx - 1;\n \nfor (i = L1; i <= L2; i += 1) {\n \n#pragma omp parallel for private (q,u21,j,k)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (q,u21,k)\n for (k = 1; k <= nz - 2; k += 1) {\n flux[i][j][k][0] = u[i][j][k][1];\n u21 = u[i][j][k][1] / u[i][j][k][0];\n q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];\n flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q);\n flux[i][j][k][2] = u[i][j][k][2] * u21;\n flux[i][j][k][3] = u[i][j][k][3] * u21;\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21;\n }\n }\n } #pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u21,j,k)", "context_chars": 100, "text": "omp parallel for private (q,u21,i,j,k) firstprivate (L2)\n for (i = L1; i <= L2; i += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (q,u21,k)\n for (k = 1; k <= nz - 2; k += 1) {\n flux[i][j][k][0] = u[i][j][k][1];\n u21 = u[i][j][k][1] / u[i][j][k][0];\n q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];\n flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q);\n flux[i][j][k][2] = u[i][j][k][2] * u21;\n flux[i][j][k][3] = u[i][j][k][3] * u21;\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21;\n }\n } #pragma omp parallel for private (q,u21,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u21,k)", "context_chars": 100, "text": " \n#pragma omp parallel for private (q,u21,j,k)\n for (j = jst; j <= jend; j += 1) {\n \nfor (k = 1; k <= nz - 2; k += 1) {\n flux[i][j][k][0] = u[i][j][k][1];\n u21 = u[i][j][k][1] / u[i][j][k][0];\n q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];\n flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q);\n flux[i][j][k][2] = u[i][j][k][2] * u21;\n flux[i][j][k][3] = u[i][j][k][3] * u21;\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21;\n } #pragma omp parallel for private (q,u21,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,j,k,m)", "context_chars": 100, "text": " flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21;\n }\n }\n }\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,k,m) firstprivate (nx)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (i,m)\n for (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tx2)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n }\n }\n L2 = nx - 1;\n \n#pragma omp parallel for private (tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i) firstprivate (L2)\n for (i = ist; i <= L2; i += 1) {\n tmp = 1.0 / u[i][j][k][0];\n u21i = tmp * u[i][j][k][1];\n u31i = tmp * u[i][j][k][2];\n u41i = tmp * u[i][j][k][3];\n u51i = tmp * u[i][j][k][4];\n tmp = 1.0 / u[i - 1][j][k][0];\n u21im1 = tmp * u[i - 1][j][k][1];\n u31im1 = tmp * u[i - 1][j][k][2];\n u41im1 = tmp * u[i - 1][j][k][3];\n u51im1 = tmp * u[i - 1][j][k][4];\n flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1);\n flux[i][j][k][2] = tx3 * (u31i - u31im1);\n flux[i][j][k][3] = tx3 * (u41i - u41im1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);\n }\n \n#pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5)\n for (i = ist; i <= iend; i += 1) {\n rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]);\n rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]);\n rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]);\n rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]);\n rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]);\n }\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]);\n rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (- 4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]);\n }\n ist1 = 3;\n iend1 = nx - 4;\n \n#pragma omp parallel for private (i,m) firstprivate (iend1)\n for (i = ist1; i <= iend1; i += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]);\n rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]);\n }\n }\n } #pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,k,m) firstprivate (nx)", "context_chars": 100, "text": "i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,j,k,m)\n for (j = jst; j <= jend; j += 1) {\n \nfor (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (i,m)\n for (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tx2)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n }\n }\n L2 = nx - 1;\n \n#pragma omp parallel for private (tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i) firstprivate (L2)\n for (i = ist; i <= L2; i += 1) {\n tmp = 1.0 / u[i][j][k][0];\n u21i = tmp * u[i][j][k][1];\n u31i = tmp * u[i][j][k][2];\n u41i = tmp * u[i][j][k][3];\n u51i = tmp * u[i][j][k][4];\n tmp = 1.0 / u[i - 1][j][k][0];\n u21im1 = tmp * u[i - 1][j][k][1];\n u31im1 = tmp * u[i - 1][j][k][2];\n u41im1 = tmp * u[i - 1][j][k][3];\n u51im1 = tmp * u[i - 1][j][k][4];\n flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1);\n flux[i][j][k][2] = tx3 * (u31i - u31im1);\n flux[i][j][k][3] = tx3 * (u41i - u41im1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);\n }\n \n#pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5)\n for (i = ist; i <= iend; i += 1) {\n rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]);\n rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]);\n rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]);\n rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]);\n rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]);\n }\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]);\n rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (- 4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]);\n }\n ist1 = 3;\n iend1 = nx - 4;\n \n#pragma omp parallel for private (i,m) firstprivate (iend1)\n for (i = ist1; i <= iend1; i += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]);\n rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]);\n }\n } #pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,k,m) firstprivate (nx)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,m)", "context_chars": 100, "text": "im1,u31im1,u41im1,u51im1,i,k,m) firstprivate (nx)\n for (k = 1; k <= nz - 2; k += 1) {\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tx2)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n }\n } #pragma omp parallel for private (i,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (tx2)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,m)\n for (i = ist; i <= iend; i += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (tx2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i) firstprivate (L2)", "context_chars": 100, "text": " (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);\n }\n }\n L2 = nx - 1;\n \nfor (i = ist; i <= L2; i += 1) {\n tmp = 1.0 / u[i][j][k][0];\n u21i = tmp * u[i][j][k][1];\n u31i = tmp * u[i][j][k][2];\n u41i = tmp * u[i][j][k][3];\n u51i = tmp * u[i][j][k][4];\n tmp = 1.0 / u[i - 1][j][k][0];\n u21im1 = tmp * u[i - 1][j][k][1];\n u31im1 = tmp * u[i - 1][j][k][2];\n u41im1 = tmp * u[i - 1][j][k][3];\n u51im1 = tmp * u[i - 1][j][k][4];\n flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1);\n flux[i][j][k][2] = tx3 * (u31i - u31im1);\n flux[i][j][k][3] = tx3 * (u41i - u41im1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);\n } #pragma omp parallel for private (tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i) firstprivate (L2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5)", "context_chars": 100, "text": "* (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);\n }\n \nfor (i = ist; i <= iend; i += 1) {\n rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]);\n rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]);\n rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]);\n rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]);\n rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]);\n } #pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "h-order dissipation\n--------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]);\n rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (- 4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,m) firstprivate (iend1)", "context_chars": 100, "text": " 4.0 * u[3][j][k][m] + u[4][j][k][m]);\n }\n ist1 = 3;\n iend1 = nx - 4;\n \nfor (i = ist1; i <= iend1; i += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n } #pragma omp parallel for private (i,m) firstprivate (iend1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "llel for private (i,m) firstprivate (iend1)\n for (i = ist1; i <= iend1; i += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp)", "context_chars": 100, "text": " 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]);\n rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2)", "context_chars": 100, "text": "---------------------------------------------------------------*/\n L1 = 0;\n L2 = ny - 1;\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (q,u31,j,k)\n for (j = L1; j <= L2; j += 1) {\n \n#pragma omp parallel for private (q,u31,k)\n for (k = 1; k <= nz - 2; k += 1) {\n flux[i][j][k][0] = u[i][j][k][2];\n u31 = u[i][j][k][2] / u[i][j][k][0];\n q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];\n flux[i][j][k][1] = u[i][j][k][1] * u31;\n flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q);\n flux[i][j][k][3] = u[i][j][k][3] * u31;\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31;\n }\n }\n } #pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u31,j,k)", "context_chars": 100, "text": "rallel for private (q,u31,i,j,k) firstprivate (L1,L2)\n for (i = ist; i <= iend; i += 1) {\n \nfor (j = L1; j <= L2; j += 1) {\n \n#pragma omp parallel for private (q,u31,k)\n for (k = 1; k <= nz - 2; k += 1) {\n flux[i][j][k][0] = u[i][j][k][2];\n u31 = u[i][j][k][2] / u[i][j][k][0];\n q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];\n flux[i][j][k][1] = u[i][j][k][1] * u31;\n flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q);\n flux[i][j][k][3] = u[i][j][k][3] * u31;\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31;\n }\n } #pragma omp parallel for private (q,u31,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u31,k)", "context_chars": 100, "text": "\n \n#pragma omp parallel for private (q,u31,j,k)\n for (j = L1; j <= L2; j += 1) {\n \nfor (k = 1; k <= nz - 2; k += 1) {\n flux[i][j][k][0] = u[i][j][k][2];\n u31 = u[i][j][k][2] / u[i][j][k][0];\n q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];\n flux[i][j][k][1] = u[i][j][k][1] * u31;\n flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q);\n flux[i][j][k][3] = u[i][j][k][3] * u31;\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31;\n } #pragma omp parallel for private (q,u31,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,i,j,k,m) firstprivate (nz)", "context_chars": 100, "text": " flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31;\n }\n }\n }\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j,k,m) firstprivate (ny)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (ty2)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n }\n }\n L2 = ny - 1;\n \n#pragma omp parallel for private (tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j) firstprivate (L2)\n for (j = jst; j <= L2; j += 1) {\n tmp = 1.0 / u[i][j][k][0];\n u21j = tmp * u[i][j][k][1];\n u31j = tmp * u[i][j][k][2];\n u41j = tmp * u[i][j][k][3];\n u51j = tmp * u[i][j][k][4];\n tmp = 1.0 / u[i][j - 1][k][0];\n u21jm1 = tmp * u[i][j - 1][k][1];\n u31jm1 = tmp * u[i][j - 1][k][2];\n u41jm1 = tmp * u[i][j - 1][k][3];\n u51jm1 = tmp * u[i][j - 1][k][4];\n flux[i][j][k][1] = ty3 * (u21j - u21jm1);\n flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1);\n flux[i][j][k][3] = ty3 * (u41j - u41jm1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);\n }\n \n#pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5)\n for (j = jst; j <= jend; j += 1) {\n rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]);\n rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]);\n rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]);\n rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]);\n rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]);\n }\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]);\n rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (- 4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]);\n }\n jst1 = 3;\n jend1 = ny - 4;\n \n#pragma omp parallel for private (j,m) firstprivate (jend1)\n for (j = jst1; j <= jend1; j += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]);\n rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]);\n }\n }\n } #pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,i,j,k,m) firstprivate (nz)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j,k,m) firstprivate (ny)", "context_chars": 100, "text": "21jm1,u31jm1,u41jm1,u51jm1,i,j,k,m) firstprivate (nz)\n for (i = ist; i <= iend; i += 1) {\n \nfor (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (j,m)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (ty2)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n }\n }\n L2 = ny - 1;\n \n#pragma omp parallel for private (tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j) firstprivate (L2)\n for (j = jst; j <= L2; j += 1) {\n tmp = 1.0 / u[i][j][k][0];\n u21j = tmp * u[i][j][k][1];\n u31j = tmp * u[i][j][k][2];\n u41j = tmp * u[i][j][k][3];\n u51j = tmp * u[i][j][k][4];\n tmp = 1.0 / u[i][j - 1][k][0];\n u21jm1 = tmp * u[i][j - 1][k][1];\n u31jm1 = tmp * u[i][j - 1][k][2];\n u41jm1 = tmp * u[i][j - 1][k][3];\n u51jm1 = tmp * u[i][j - 1][k][4];\n flux[i][j][k][1] = ty3 * (u21j - u21jm1);\n flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1);\n flux[i][j][k][3] = ty3 * (u41j - u41jm1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);\n }\n \n#pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5)\n for (j = jst; j <= jend; j += 1) {\n rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]);\n rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]);\n rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]);\n rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]);\n rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]);\n }\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]);\n rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (- 4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]);\n }\n jst1 = 3;\n jend1 = ny - 4;\n \n#pragma omp parallel for private (j,m) firstprivate (jend1)\n for (j = jst1; j <= jend1; j += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]);\n rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]);\n }\n } #pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j,k,m) firstprivate (ny)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": "jm1,u31jm1,u41jm1,u51jm1,j,k,m) firstprivate (ny)\n for (k = 1; k <= nz - 2; k += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (m) firstprivate (ty2)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n }\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (ty2)", "context_chars": 100, "text": " \n#pragma omp parallel for private (j,m)\n for (j = jst; j <= jend; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n } #pragma omp parallel for private (m) firstprivate (ty2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j) firstprivate (L2)", "context_chars": 100, "text": " (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);\n }\n }\n L2 = ny - 1;\n \nfor (j = jst; j <= L2; j += 1) {\n tmp = 1.0 / u[i][j][k][0];\n u21j = tmp * u[i][j][k][1];\n u31j = tmp * u[i][j][k][2];\n u41j = tmp * u[i][j][k][3];\n u51j = tmp * u[i][j][k][4];\n tmp = 1.0 / u[i][j - 1][k][0];\n u21jm1 = tmp * u[i][j - 1][k][1];\n u31jm1 = tmp * u[i][j - 1][k][2];\n u41jm1 = tmp * u[i][j - 1][k][3];\n u51jm1 = tmp * u[i][j - 1][k][4];\n flux[i][j][k][1] = ty3 * (u21j - u21jm1);\n flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1);\n flux[i][j][k][3] = ty3 * (u41j - u41jm1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);\n } #pragma omp parallel for private (tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j) firstprivate (L2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5)", "context_chars": 100, "text": "* (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);\n }\n \nfor (j = jst; j <= jend; j += 1) {\n rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]);\n rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]);\n rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]);\n rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]);\n rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]);\n } #pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "h-order dissipation\n--------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]);\n rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (- 4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,m) firstprivate (jend1)", "context_chars": 100, "text": " 4.0 * u[i][3][k][m] + u[i][4][k][m]);\n }\n jst1 = 3;\n jend1 = ny - 4;\n \nfor (j = jst1; j <= jend1; j += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n } #pragma omp parallel for private (j,m) firstprivate (jend1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "llel for private (j,m) firstprivate (jend1)\n for (j = jst1; j <= jend1; j += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp)", "context_chars": 100, "text": " 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]);\n rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,i,j,k,m) firstprivate (iend,jst,jend)", "context_chars": 100, "text": "ection flux differences\n--------------------------------------------------------------------*/\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,j,k,m) firstprivate (nz)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (q,u41,k)\n for (k = 0; k <= nz - 1; k += 1) {\n flux[i][j][k][0] = u[i][j][k][3];\n u41 = u[i][j][k][3] / u[i][j][k][0];\n q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];\n flux[i][j][k][1] = u[i][j][k][1] * u41;\n flux[i][j][k][2] = u[i][j][k][2] * u41;\n flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q);\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41;\n }\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tz2)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n }\n }\n \n#pragma omp parallel for private (tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,k)\n for (k = 1; k <= nz - 1; k += 1) {\n tmp = 1.0 / u[i][j][k][0];\n u21k = tmp * u[i][j][k][1];\n u31k = tmp * u[i][j][k][2];\n u41k = tmp * u[i][j][k][3];\n u51k = tmp * u[i][j][k][4];\n tmp = 1.0 / u[i][j][k - 1][0];\n u21km1 = tmp * u[i][j][k - 1][1];\n u31km1 = tmp * u[i][j][k - 1][2];\n u41km1 = tmp * u[i][j][k - 1][3];\n u51km1 = tmp * u[i][j][k - 1][4];\n flux[i][j][k][1] = tz3 * (u21k - u21km1);\n flux[i][j][k][2] = tz3 * (u31k - u31km1);\n flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);\n }\n \n#pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5)\n for (k = 1; k <= nz - 2; k += 1) {\n rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]);\n rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]);\n rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]);\n rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]);\n rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]);\n }\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]);\n rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (- 4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]);\n }\n \n#pragma omp parallel for private (k,m)\n for (k = 3; k <= nz - 4; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]);\n rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]);\n }\n }\n } #pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,i,j,k,m) firstprivate (iend,jst,jend)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,j,k,m) firstprivate (nz)", "context_chars": 100, "text": "1,u41km1,u51km1,i,j,k,m) firstprivate (iend,jst,jend)\n for (i = ist; i <= iend; i += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (q,u41,k)\n for (k = 0; k <= nz - 1; k += 1) {\n flux[i][j][k][0] = u[i][j][k][3];\n u41 = u[i][j][k][3] / u[i][j][k][0];\n q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];\n flux[i][j][k][1] = u[i][j][k][1] * u41;\n flux[i][j][k][2] = u[i][j][k][2] * u41;\n flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q);\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41;\n }\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tz2)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n }\n }\n \n#pragma omp parallel for private (tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,k)\n for (k = 1; k <= nz - 1; k += 1) {\n tmp = 1.0 / u[i][j][k][0];\n u21k = tmp * u[i][j][k][1];\n u31k = tmp * u[i][j][k][2];\n u41k = tmp * u[i][j][k][3];\n u51k = tmp * u[i][j][k][4];\n tmp = 1.0 / u[i][j][k - 1][0];\n u21km1 = tmp * u[i][j][k - 1][1];\n u31km1 = tmp * u[i][j][k - 1][2];\n u41km1 = tmp * u[i][j][k - 1][3];\n u51km1 = tmp * u[i][j][k - 1][4];\n flux[i][j][k][1] = tz3 * (u21k - u21km1);\n flux[i][j][k][2] = tz3 * (u31k - u31km1);\n flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);\n }\n \n#pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5)\n for (k = 1; k <= nz - 2; k += 1) {\n rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]);\n rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]);\n rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]);\n rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]);\n rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]);\n }\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]);\n rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (- 4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]);\n }\n \n#pragma omp parallel for private (k,m)\n for (k = 3; k <= nz - 4; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n \n#pragma omp parallel for private (m) firstprivate (dssp)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]);\n rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]);\n }\n } #pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,j,k,m) firstprivate (nz)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (q,u41,k)", "context_chars": 100, "text": "km1,u31km1,u41km1,u51km1,j,k,m) firstprivate (nz)\n for (j = jst; j <= jend; j += 1) {\n \nfor (k = 0; k <= nz - 1; k += 1) {\n flux[i][j][k][0] = u[i][j][k][3];\n u41 = u[i][j][k][3] / u[i][j][k][0];\n q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];\n flux[i][j][k][1] = u[i][j][k][1] * u41;\n flux[i][j][k][2] = u[i][j][k][2] * u41;\n flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q);\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41;\n } #pragma omp parallel for private (q,u41,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": ");\n flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41;\n }\n \nfor (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tz2)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (tz2)", "context_chars": 100, "text": " \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n } #pragma omp parallel for private (m) firstprivate (tz2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,k)", "context_chars": 100, "text": "d[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);\n }\n }\n \nfor (k = 1; k <= nz - 1; k += 1) {\n tmp = 1.0 / u[i][j][k][0];\n u21k = tmp * u[i][j][k][1];\n u31k = tmp * u[i][j][k][2];\n u41k = tmp * u[i][j][k][3];\n u51k = tmp * u[i][j][k][4];\n tmp = 1.0 / u[i][j][k - 1][0];\n u21km1 = tmp * u[i][j][k - 1][1];\n u31km1 = tmp * u[i][j][k - 1][2];\n u41km1 = tmp * u[i][j][k - 1][3];\n u51km1 = tmp * u[i][j][k - 1][4];\n flux[i][j][k][1] = tz3 * (u21k - u21km1);\n flux[i][j][k][2] = tz3 * (u31k - u31km1);\n flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1);\n flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);\n } #pragma omp parallel for private (tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5)", "context_chars": 100, "text": "* (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);\n }\n \nfor (k = 1; k <= nz - 2; k += 1) {\n rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]);\n rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]);\n rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]);\n rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]);\n rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]);\n } #pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "h-order dissipation\n--------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]);\n rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (- 4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": ".0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]);\n }\n \nfor (k = 3; k <= nz - 4; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " \n#pragma omp parallel for private (k,m)\n for (k = 3; k <= nz - 4; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dssp)", "context_chars": 100, "text": " 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]);\n rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]);\n } #pragma omp parallel for private (m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (pxi,peta,pzeta,m) firstprivate (xi,eta,zeta)", "context_chars": 100, "text": " exact(iglob,jglob,0,ue_ij1);\n exact(iglob,jglob,nz - 1,ue_ijnz);\n \nfor (m = 0; m <= 4; m += 1) {\n pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m];\n peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m];\n pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m];\n u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta;\n } #pragma omp parallel for private (pxi,peta,pzeta,m) firstprivate (xi,eta,zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "before timestepping).\n--------------------------------------------------------------------*/\n{\n \nfor (i = 0; i <= 63; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= 63; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= 4; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n a[i][j][k][m] = 0.0;\n b[i][j][k][m] = 0.0;\n c[i][j][k][m] = 0.0;\n d[i][j][k][m] = 0.0;\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "-----*/\n{\n \n#pragma omp parallel for private (i,j,k,m)\n for (i = 0; i <= 63; i += 1) {\n \nfor (j = 0; j <= 63; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= 4; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n a[i][j][k][m] = 0.0;\n b[i][j][k][m] = 0.0;\n c[i][j][k][m] = 0.0;\n d[i][j][k][m] = 0.0;\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": " 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = 0; j <= 63; j += 1) {\n \nfor (k = 0; k <= 4; k += 1) {\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n a[i][j][k][m] = 0.0;\n b[i][j][k][m] = 0.0;\n c[i][j][k][m] = 0.0;\n d[i][j][k][m] = 0.0;\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " {\n \n#pragma omp parallel for private (k,m)\n for (k = 0; k <= 4; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n a[i][j][k][m] = 0.0;\n b[i][j][k][m] = 0.0;\n c[i][j][k][m] = 0.0;\n d[i][j][k][m] = 0.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "erform SSOR iteration\n--------------------------------------------------------------------*/\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (j,k,m)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dt)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,j,k,m)\n for (i = ist; i <= iend; i += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dt)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": " \n#pragma omp parallel for private (j,k,m)\n for (j = jst; j <= jend; j += 1) {\n \nfor (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (dt)\n for (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dt)", "context_chars": 100, "text": " \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n } #pragma omp parallel for private (m) firstprivate (dt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": " update the variables\n--------------------------------------------------------------------*/\n \nfor (i = ist; i <= iend; i += 1) {\n \n#pragma omp parallel for private (j,k,m) firstprivate (nz)\n for (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tmp)\n for (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m];\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m) firstprivate (nz)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,j,k,m)\n for (i = ist; i <= iend; i += 1) {\n \nfor (j = jst; j <= jend; j += 1) {\n \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tmp)\n for (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m];\n }\n }\n } #pragma omp parallel for private (j,k,m) firstprivate (nz)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": "arallel for private (j,k,m) firstprivate (nz)\n for (j = jst; j <= jend; j += 1) {\n \nfor (k = 1; k <= nz - 2; k += 1) {\n \n#pragma omp parallel for private (m) firstprivate (tmp)\n for (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m];\n }\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (tmp)", "context_chars": 100, "text": " \n#pragma omp parallel for private (k,m)\n for (k = 1; k <= nz - 2; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m];\n } #pragma omp parallel for private (m) firstprivate (tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "--------------------------------------*/\n epsilon = 1.0e-08;\n *class = 'U';\n *verified = 1;\n \nfor (m = 0; m <= 4; m += 1) {\n xcrref[m] = 1.0;\n xceref[m] = 1.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " 1006 format(' WARNING: compiled for ', i5, ' processes. ',\n > ' Will not verify. ')*/\n \nfor (i = 0; i <= 2; i += 1) {\n dims[i][0] = 256;\n dims[i][1] = 256;\n dims[i][2] = 128;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " (i = 0; i <= 2; i += 1) {\n dims[i][0] = 256;\n dims[i][1] = 256;\n dims[i][2] = 128;\n }\n \nfor (i = 0; i <= 2; i += 1) {\n xstart[i] = 1;\n xend[i] = 256;\n ystart[i] = 1;\n yend[i] = 256;\n zstart[i] = 1;\n zend[i] = 128;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (ii,ii2,jj,ij2,kk,i,j,k)", "context_chars": 100, "text": ":\nc mod(i-1+n/2, n) - n/2\nc-------------------------------------------------------------------*/\n \nfor (i = 0; i <= dims[2][0] - 1; i += 1) {\n ii = (i + 1 + xstart[2] - 2 + 256 / 2) % 256 - 256 / 2;\n ii2 = ii * ii;\n \n#pragma omp parallel for private (jj,ij2,kk,j,k) firstprivate (ii2)\n for (j = 0; j <= dims[2][1] - 1; j += 1) {\n jj = (j + 1 + ystart[2] - 2 + 256 / 2) % 256 - 256 / 2;\n ij2 = jj * jj + ii2;\n \n#pragma omp parallel for private (kk,k) firstprivate (ij2)\n for (k = 0; k <= dims[2][2] - 1; k += 1) {\n kk = (k + 1 + zstart[2] - 2 + 128 / 2) % 128 - 128 / 2;\n indexmap[k][j][i] = kk * kk + ij2;\n }\n }\n } #pragma omp parallel for private (ii,ii2,jj,ij2,kk,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (jj,ij2,kk,j,k) firstprivate (ii2)", "context_chars": 100, "text": " - 1; i += 1) {\n ii = (i + 1 + xstart[2] - 2 + 256 / 2) % 256 - 256 / 2;\n ii2 = ii * ii;\n \nfor (j = 0; j <= dims[2][1] - 1; j += 1) {\n jj = (j + 1 + ystart[2] - 2 + 256 / 2) % 256 - 256 / 2;\n ij2 = jj * jj + ii2;\n \n#pragma omp parallel for private (kk,k) firstprivate (ij2)\n for (k = 0; k <= dims[2][2] - 1; k += 1) {\n kk = (k + 1 + zstart[2] - 2 + 128 / 2) % 128 - 128 / 2;\n indexmap[k][j][i] = kk * kk + ij2;\n }\n } #pragma omp parallel for private (jj,ij2,kk,j,k) firstprivate (ii2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (kk,k) firstprivate (ij2)", "context_chars": 100, "text": ") {\n jj = (j + 1 + ystart[2] - 2 + 256 / 2) % 256 - 256 / 2;\n ij2 = jj * jj + ii2;\n \nfor (k = 0; k <= dims[2][2] - 1; k += 1) {\n kk = (k + 1 + zstart[2] - 2 + 128 / 2) % 128 - 128 / 2;\n indexmap[k][j][i] = kk * kk + ij2;\n } #pragma omp parallel for private (kk,k) firstprivate (ij2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "lock; jj += fftblock) {\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n \nfor (j = 0; j <= fftblock - 1; j += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= d[0] - 1; i += 1) {\n y0[i][j] . real = x[k][j + jj][i] . real;\n y0[i][j] . imag = x[k][j + jj][i] . imag;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j)\n for (j = 0; j <= fftblock - 1; j += 1) {\n \nfor (i = 0; i <= d[0] - 1; i += 1) {\n y0[i][j] . real = x[k][j + jj][i] . real;\n y0[i][j] . imag = x[k][j + jj][i] . imag;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "imer_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n \nfor (j = 0; j <= fftblock - 1; j += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= d[0] - 1; i += 1) {\n xout[k][j + jj][i] . real = y0[i][j] . real;\n xout[k][j + jj][i] . imag = y0[i][j] . imag;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j)\n for (j = 0; j <= fftblock - 1; j += 1) {\n \nfor (i = 0; i <= d[0] - 1; i += 1) {\n xout[k][j + jj][i] . real = y0[i][j] . real;\n xout[k][j + jj][i] . imag = y0[i][j] . imag;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": " fftblock; ii += fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n \nfor (j = 0; j <= d[1] - 1; j += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= fftblock - 1; i += 1) {\n y0[j][i] . real = x[k][j][i + ii] . real;\n y0[j][i] . imag = x[k][j][i + ii] . imag;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,j)\n for (j = 0; j <= d[1] - 1; j += 1) {\n \nfor (i = 0; i <= fftblock - 1; i += 1) {\n y0[j][i] . real = x[k][j][i + ii] . real;\n y0[j][i] . imag = x[k][j][i + ii] . imag;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "imer_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n \nfor (j = 0; j <= d[1] - 1; j += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= fftblock - 1; i += 1) {\n xout[k][j][i + ii] . real = y0[j][i] . real;\n xout[k][j][i + ii] . imag = y0[j][i] . imag;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,j)\n for (j = 0; j <= d[1] - 1; j += 1) {\n \nfor (i = 0; i <= fftblock - 1; i += 1) {\n xout[k][j][i + ii] . real = y0[j][i] . real;\n xout[k][j][i + ii] . imag = y0[j][i] . imag;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": " fftblock; ii += fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n \nfor (k = 0; k <= d[2] - 1; k += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= fftblock - 1; i += 1) {\n y0[k][i] . real = x[k][j][i + ii] . real;\n y0[k][i] . imag = x[k][j][i + ii] . imag;\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,k)\n for (k = 0; k <= d[2] - 1; k += 1) {\n \nfor (i = 0; i <= fftblock - 1; i += 1) {\n y0[k][i] . real = x[k][j][i + ii] . real;\n y0[k][i] . imag = x[k][j][i + ii] . imag;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": "mer_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n \nfor (k = 0; k <= d[2] - 1; k += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= fftblock - 1; i += 1) {\n xout[k][j][i + ii] . real = y0[k][i] . real;\n xout[k][j][i + ii] . imag = y0[k][i] . imag;\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " \n#pragma omp parallel for private (i,k)\n for (k = 0; k <= d[2] - 1; k += 1) {\n \nfor (i = 0; i <= fftblock - 1; i += 1) {\n xout[k][j][i + ii] . real = y0[k][i] . real;\n xout[k][j][i + ii] . imag = y0[k][i] . imag;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i,j) firstprivate (fftblock,n)", "context_chars": 100, "text": " X.\nc-------------------------------------------------------------------*/\n if (m % 2 == 1) {\n \nfor (j = 0; j <= n - 1; j += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 0; i <= fftblock - 1; i += 1) {\n x[j][i] . real = y[j][i] . real;\n x[j][i] . imag = y[j][i] . imag;\n }\n } #pragma omp parallel for private (i,j) firstprivate (fftblock,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "p parallel for private (i,j) firstprivate (fftblock,n)\n for (j = 0; j <= n - 1; j += 1) {\n \nfor (i = 0; i <= fftblock - 1; i += 1) {\n x[j][i] . real = y[j][i] . real;\n x[j][i] . imag = y[j][i] . imag;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (i11,i12,i21,i22)", "context_chars": 100, "text": "---------------------------------------------------*/\n for (k = 0; k <= lk - 1; k += 1) {\n \nfor (j = 0; j <= ny - 1; j += 1) {\n double x11real;\n double x11imag;\n double x21real;\n double x21imag;\n x11real = x[i11 + k][j] . real;\n x11imag = x[i11 + k][j] . imag;\n x21real = x[i12 + k][j] . real;\n x21imag = x[i12 + k][j] . imag;\n y[i21 + k][j] . real = x11real + x21real;\n y[i21 + k][j] . imag = x11imag + x21imag;\n y[i22 + k][j] . real = u1 . real * (x11real - x21real) - u1 . imag * (x11imag - x21imag);\n y[i22 + k][j] . imag = u1 . real * (x11imag - x21imag) + u1 . imag * (x11real - x21real);\n } #pragma omp parallel for private (j) firstprivate (i11,i12,i21,i22)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "of update to the vector u\nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "--------------*/\n \n#pragma omp parallel for private (i,j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": ";\n int m;\n int d;\n double xi;\n double eta;\n double zeta;\n double u_exact[5];\n double add;\n \nfor (m = 0; m <= 4; m += 1) {\n rms[m] = 0.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (add,m)", "context_chars": 100, "text": " += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,u_exact);\n \nfor (m = 0; m <= 4; m += 1) {\n add = u[m][i][j][k] - u_exact[m];\n rms[m] = rms[m] + add * add;\n } #pragma omp parallel for private (add,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "-----------------------------------*/\n int i;\n int j;\n int k;\n int d;\n int m;\n double add;\n \nfor (m = 0; m <= 4; m += 1) {\n rms[m] = 0.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "dd;\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 4; m += 1) {\n rms[m] = 0.0;\n }\n//for (i = 0; i <= grid_points[0] - 2; i += 1) {\n//#pragma omp parallel for \n for (j = 0; j <= grid_points[1] - 2; j += 1) {\n//#pragma omp parallel for \n for (k = 0; k <= grid_points[2] - 2; k += 1) {\n \n#pragma omp parallel for private (add,m)\n for (m = 0; m <= 4; m += 1) {\n add = rhs[m][i][j][k];\n rms[m] = rms[m] + add * add;\n }\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " rms[m] = 0.0;\n }\n//#pragma omp parallel for \n for (i = 0; i <= grid_points[0] - 2; i += 1) {\n//for (j = 0; j <= grid_points[1] - 2; j += 1) {\n//#pragma omp parallel for \n for (k = 0; k <= grid_points[2] - 2; k += 1) {\n \n#pragma omp parallel for private (add,m)\n for (m = 0; m <= 4; m += 1) {\n add = rhs[m][i][j][k];\n rms[m] = rms[m] + add * add;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "0] - 2; i += 1) {\n//#pragma omp parallel for \n for (j = 0; j <= grid_points[1] - 2; j += 1) {\n//for (k = 0; k <= grid_points[2] - 2; k += 1) {\n \n#pragma omp parallel for private (add,m)\n for (m = 0; m <= 4; m += 1) {\n add = rhs[m][i][j][k];\n rms[m] = rms[m] + add * add;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (add,m)", "context_chars": 100, "text": "j += 1) {\n//#pragma omp parallel for \n for (k = 0; k <= grid_points[2] - 2; k += 1) {\n \nfor (m = 0; m <= 4; m += 1) {\n add = rhs[m][i][j][k];\n rms[m] = rms[m] + add * add;\n } #pragma omp parallel for private (add,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m,i,j,k)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n forcing[m][i][j][k] = 0.0;\n }\n }\n }\n } #pragma omp parallel for private (m,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "--------------*/\n \n#pragma omp parallel for private (m,i,j,k)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n forcing[m][i][j][k] = 0.0;\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n forcing[m][i][j][k] = 0.0;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n forcing[m][i][j][k] = 0.0;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "1; i += 1) {\n xi = ((double )i) * dnxm1;\n exact_solution(xi,eta,zeta,dtemp);\n \nfor (m = 0; m <= 4; m += 1) {\n ue[m][i] = dtemp[m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dtpp)", "context_chars": 100, "text": " m <= 4; m += 1) {\n ue[m][i] = dtemp[m];\n }\n dtpp = 1.0 / dtemp[0];\n \nfor (m = 1; m <= 4; m += 1) {\n buf[m][i] = dtpp * dtemp[m];\n } #pragma omp parallel for private (m) firstprivate (dtpp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (ip1,im1,i) firstprivate (tx2,xxcon1,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2)", "context_chars": 100, "text": " q[i] = 0.5 * (buf[1][i] * ue[1][i] + buf[2][i] * ue[2][i] + buf[3][i] * ue[3][i]);\n }\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n im1 = i - 1;\n ip1 = i + 1;\n forcing[0][i][j][k] = forcing[0][i][j][k] - tx2 * (ue[1][ip1] - ue[1][im1]) + dx1tx1 * (ue[0][ip1] - 2.0 * ue[0][i] + ue[0][im1]);\n forcing[1][i][j][k] = forcing[1][i][j][k] - tx2 * (ue[1][ip1] * buf[1][ip1] + c2 * (ue[4][ip1] - q[ip1]) - (ue[1][im1] * buf[1][im1] + c2 * (ue[4][im1] - q[im1]))) + xxcon1 * (buf[1][ip1] - 2.0 * buf[1][i] + buf[1][im1]) + dx2tx1 * (ue[1][ip1] - 2.0 * ue[1][i] + ue[1][im1]);\n forcing[2][i][j][k] = forcing[2][i][j][k] - tx2 * (ue[2][ip1] * buf[1][ip1] - ue[2][im1] * buf[1][im1]) + xxcon2 * (buf[2][ip1] - 2.0 * buf[2][i] + buf[2][im1]) + dx3tx1 * (ue[2][ip1] - 2.0 * ue[2][i] + ue[2][im1]);\n forcing[3][i][j][k] = forcing[3][i][j][k] - tx2 * (ue[3][ip1] * buf[1][ip1] - ue[3][im1] * buf[1][im1]) + xxcon2 * (buf[3][ip1] - 2.0 * buf[3][i] + buf[3][im1]) + dx4tx1 * (ue[3][ip1] - 2.0 * ue[3][i] + ue[3][im1]);\n forcing[4][i][j][k] = forcing[4][i][j][k] - tx2 * (buf[1][ip1] * (c1 * ue[4][ip1] - c2 * q[ip1]) - buf[1][im1] * (c1 * ue[4][im1] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[0][ip1] - 2.0 * buf[0][i] + buf[0][im1]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[4][ip1] - 2.0 * buf[4][i] + buf[4][im1]) + dx5tx1 * (ue[4][ip1] - 2.0 * ue[4][i] + ue[4][im1]);\n } #pragma omp parallel for private (ip1,im1,i) firstprivate (tx2,xxcon1,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,m)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n i = 1;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]);\n i = 2;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (- 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]);\n } #pragma omp parallel for private (i,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m,i)", "context_chars": 100, "text": " dssp * (- 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]);\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i)\n for (i = 3; i <= grid_points[0] - 4; i += 1) {\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]);\n }\n } #pragma omp parallel for private (m,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n }\n \n#pragma omp parallel for private (m,i)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 3; i <= grid_points[0] - 4; i += 1) {\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]);\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,m) firstprivate (dssp)", "context_chars": 100, "text": " 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n i = grid_points[0] - 3;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1]);\n i = grid_points[0] - 2;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 5.0 * ue[m][i]);\n } #pragma omp parallel for private (i,m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "; j += 1) {\n eta = ((double )j) * dnym1;\n exact_solution(xi,eta,zeta,dtemp);\n \nfor (m = 0; m <= 4; m += 1) {\n ue[m][j] = dtemp[m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dtpp)", "context_chars": 100, "text": " m <= 4; m += 1) {\n ue[m][j] = dtemp[m];\n }\n dtpp = 1.0 / dtemp[0];\n \nfor (m = 1; m <= 4; m += 1) {\n buf[m][j] = dtpp * dtemp[m];\n } #pragma omp parallel for private (m) firstprivate (dtpp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (jp1,jm1,j) firstprivate (ty2,yycon1,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2)", "context_chars": 100, "text": " q[j] = 0.5 * (buf[1][j] * ue[1][j] + buf[2][j] * ue[2][j] + buf[3][j] * ue[3][j]);\n }\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n jm1 = j - 1;\n jp1 = j + 1;\n forcing[0][i][j][k] = forcing[0][i][j][k] - ty2 * (ue[2][jp1] - ue[2][jm1]) + dy1ty1 * (ue[0][jp1] - 2.0 * ue[0][j] + ue[0][jm1]);\n forcing[1][i][j][k] = forcing[1][i][j][k] - ty2 * (ue[1][jp1] * buf[2][jp1] - ue[1][jm1] * buf[2][jm1]) + yycon2 * (buf[1][jp1] - 2.0 * buf[1][j] + buf[1][jm1]) + dy2ty1 * (ue[1][jp1] - 2.0 * ue[1][j] + ue[1][jm1]);\n forcing[2][i][j][k] = forcing[2][i][j][k] - ty2 * (ue[2][jp1] * buf[2][jp1] + c2 * (ue[4][jp1] - q[jp1]) - (ue[2][jm1] * buf[2][jm1] + c2 * (ue[4][jm1] - q[jm1]))) + yycon1 * (buf[2][jp1] - 2.0 * buf[2][j] + buf[2][jm1]) + dy3ty1 * (ue[2][jp1] - 2.0 * ue[2][j] + ue[2][jm1]);\n forcing[3][i][j][k] = forcing[3][i][j][k] - ty2 * (ue[3][jp1] * buf[2][jp1] - ue[3][jm1] * buf[2][jm1]) + yycon2 * (buf[3][jp1] - 2.0 * buf[3][j] + buf[3][jm1]) + dy4ty1 * (ue[3][jp1] - 2.0 * ue[3][j] + ue[3][jm1]);\n forcing[4][i][j][k] = forcing[4][i][j][k] - ty2 * (buf[2][jp1] * (c1 * ue[4][jp1] - c2 * q[jp1]) - buf[2][jm1] * (c1 * ue[4][jm1] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[0][jp1] - 2.0 * buf[0][j] + buf[0][jm1]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[4][jp1] - 2.0 * buf[4][j] + buf[4][jm1]) + dy5ty1 * (ue[4][jp1] - 2.0 * ue[4][j] + ue[4][jm1]);\n } #pragma omp parallel for private (jp1,jm1,j) firstprivate (ty2,yycon1,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,m)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n j = 1;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]);\n j = 2;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (- 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]);\n } #pragma omp parallel for private (j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m,j)", "context_chars": 100, "text": " dssp * (- 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]);\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (j)\n for (j = 3; j <= grid_points[1] - 4; j += 1) {\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]);\n }\n } #pragma omp parallel for private (m,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": "\n }\n \n#pragma omp parallel for private (m,j)\n for (m = 0; m <= 4; m += 1) {\n \nfor (j = 3; j <= grid_points[1] - 4; j += 1) {\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]);\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,m) firstprivate (dssp)", "context_chars": 100, "text": " 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n j = grid_points[1] - 3;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1]);\n j = grid_points[1] - 2;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 5.0 * ue[m][j]);\n } #pragma omp parallel for private (j,m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,dtemp);\n \nfor (m = 0; m <= 4; m += 1) {\n ue[m][k] = dtemp[m];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (dtpp)", "context_chars": 100, "text": " m <= 4; m += 1) {\n ue[m][k] = dtemp[m];\n }\n dtpp = 1.0 / dtemp[0];\n \nfor (m = 1; m <= 4; m += 1) {\n buf[m][k] = dtpp * dtemp[m];\n } #pragma omp parallel for private (m) firstprivate (dtpp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (km1,kp1,k) firstprivate (tz2,zzcon1,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2)", "context_chars": 100, "text": " q[k] = 0.5 * (buf[1][k] * ue[1][k] + buf[2][k] * ue[2][k] + buf[3][k] * ue[3][k]);\n }\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n km1 = k - 1;\n kp1 = k + 1;\n forcing[0][i][j][k] = forcing[0][i][j][k] - tz2 * (ue[3][kp1] - ue[3][km1]) + dz1tz1 * (ue[0][kp1] - 2.0 * ue[0][k] + ue[0][km1]);\n forcing[1][i][j][k] = forcing[1][i][j][k] - tz2 * (ue[1][kp1] * buf[3][kp1] - ue[1][km1] * buf[3][km1]) + zzcon2 * (buf[1][kp1] - 2.0 * buf[1][k] + buf[1][km1]) + dz2tz1 * (ue[1][kp1] - 2.0 * ue[1][k] + ue[1][km1]);\n forcing[2][i][j][k] = forcing[2][i][j][k] - tz2 * (ue[2][kp1] * buf[3][kp1] - ue[2][km1] * buf[3][km1]) + zzcon2 * (buf[2][kp1] - 2.0 * buf[2][k] + buf[2][km1]) + dz3tz1 * (ue[2][kp1] - 2.0 * ue[2][k] + ue[2][km1]);\n forcing[3][i][j][k] = forcing[3][i][j][k] - tz2 * (ue[3][kp1] * buf[3][kp1] + c2 * (ue[4][kp1] - q[kp1]) - (ue[3][km1] * buf[3][km1] + c2 * (ue[4][km1] - q[km1]))) + zzcon1 * (buf[3][kp1] - 2.0 * buf[3][k] + buf[3][km1]) + dz4tz1 * (ue[3][kp1] - 2.0 * ue[3][k] + ue[3][km1]);\n forcing[4][i][j][k] = forcing[4][i][j][k] - tz2 * (buf[3][kp1] * (c1 * ue[4][kp1] - c2 * q[kp1]) - buf[3][km1] * (c1 * ue[4][km1] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[0][kp1] - 2.0 * buf[0][k] + buf[0][km1]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[4][kp1] - 2.0 * buf[4][k] + buf[4][km1]) + dz5tz1 * (ue[4][kp1] - 2.0 * ue[4][k] + ue[4][km1]);\n } #pragma omp parallel for private (km1,kp1,k) firstprivate (tz2,zzcon1,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k,m)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n k = 1;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]);\n k = 2;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (- 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]);\n } #pragma omp parallel for private (k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m,k)", "context_chars": 100, "text": " dssp * (- 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]);\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 3; k <= grid_points[2] - 4; k += 1) {\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]);\n }\n } #pragma omp parallel for private (m,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "\n }\n \n#pragma omp parallel for private (m,k)\n for (m = 0; m <= 4; m += 1) {\n \nfor (k = 3; k <= grid_points[2] - 4; k += 1) {\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]);\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k,m) firstprivate (dssp)", "context_chars": 100, "text": " 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]);\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n k = grid_points[2] - 3;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1]);\n k = grid_points[2] - 2;\n forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 5.0 * ue[m][k]);\n } #pragma omp parallel for private (k,m) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m,i,j,k)", "context_chars": 100, "text": "of the forcing function, \nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n forcing[m][i][j][k] = - 1.0 * forcing[m][i][j][k];\n }\n }\n }\n } #pragma omp parallel for private (m,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "--------------*/\n \n#pragma omp parallel for private (m,i,j,k)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n forcing[m][i][j][k] = - 1.0 * forcing[m][i][j][k];\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n forcing[m][i][j][k] = - 1.0 * forcing[m][i][j][k];\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n forcing[m][i][j][k] = - 1.0 * forcing[m][i][j][k];\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (xi,eta,zeta)", "context_chars": 100, "text": " xi, eta, zeta \nc-------------------------------------------------------------------*/\n int m;\n \nfor (m = 0; m <= 4; m += 1) {\n dtemp[m] = ce[0][m] + xi * (ce[1][m] + xi * (ce[4][m] + xi * (ce[7][m] + xi * ce[10][m]))) + eta * (ce[2][m] + eta * (ce[5][m] + eta * (ce[8][m] + eta * ce[11][m]))) + zeta * (ce[3][m] + zeta * (ce[6][m] + zeta * (ce[9][m] + zeta * ce[12][m])));\n } #pragma omp parallel for private (m) firstprivate (xi,eta,zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "ng the whole thing here. \nc-------------------------------------------------------------------*/\n \nfor (i = 0; i <= 63; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= 63; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= 63; k += 1) {\n u[0][i][j][k] = 1.0;\n u[1][i][j][k] = 0.0;\n u[2][i][j][k] = 0.0;\n u[3][i][j][k] = 0.0;\n u[4][i][j][k] = 1.0;\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "---------------*/\n \n#pragma omp parallel for private (i,j,k)\n for (i = 0; i <= 63; i += 1) {\n \nfor (j = 0; j <= 63; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= 63; k += 1) {\n u[0][i][j][k] = 1.0;\n u[1][i][j][k] = 0.0;\n u[2][i][j][k] = 0.0;\n u[3][i][j][k] = 0.0;\n u[4][i][j][k] = 1.0;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "63; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= 63; j += 1) {\n \nfor (k = 0; k <= 63; k += 1) {\n u[0][i][j][k] = 1.0;\n u[1][i][j][k] = 0.0;\n u[2][i][j][k] = 0.0;\n u[3][i][j][k] = 0.0;\n u[4][i][j][k] = 1.0;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (Pxi,Peta,Pzeta,m) firstprivate (xi,eta,zeta)", "context_chars": 100, "text": " <= 1; iz += 1) {\n exact_solution(xi,eta,(double )iz,&Pface[iz][2][0]);\n }\n \nfor (m = 0; m <= 4; m += 1) {\n Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m];\n Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m];\n Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m];\n u[m][i][j][k] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta;\n } #pragma omp parallel for private (Pxi,Peta,Pzeta,m) firstprivate (xi,eta,zeta)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (i)", "context_chars": 100, "text": "2] - 1; k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[m][i][j][k] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (i)", "context_chars": 100, "text": "2] - 1; k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[m][i][j][k] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (j)", "context_chars": 100, "text": "2] - 1; k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[m][i][j][k] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (j)", "context_chars": 100, "text": "2] - 1; k += 1) {\n zeta = ((double )k) * dnzm1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[m][i][j][k] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (k)", "context_chars": 100, "text": "[1] - 1; j += 1) {\n eta = ((double )j) * dnym1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[m][i][j][k] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (k)", "context_chars": 100, "text": "[1] - 1; j += 1) {\n eta = ((double )j) * dnym1;\n exact_solution(xi,eta,zeta,temp);\n \nfor (m = 0; m <= 4; m += 1) {\n u[m][i][j][k] = temp[m];\n } #pragma omp parallel for private (m) firstprivate (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,n)", "context_chars": 100, "text": "ft hand side for starters\nc-------------------------------------------------------------------*/\n \nfor (n = 0; n <= 14; n += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n lhs[n][i][j][k] = 0.0;\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "-------------*/\n \n#pragma omp parallel for private (i,j,k,n)\n for (n = 0; n <= 14; n += 1) {\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n lhs[n][i][j][k] = 0.0;\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n lhs[n][i][j][k] = 0.0;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n lhs[n][i][j][k] = 0.0;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,n)", "context_chars": 100, "text": "l, but \nc convenient\nc-------------------------------------------------------------------*/\n \nfor (n = 0; n <= 2; n += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n lhs[5 * n + 2][i][j][k] = 1.0;\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,n)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "--------------*/\n \n#pragma omp parallel for private (i,j,k,n)\n for (n = 0; n <= 2; n += 1) {\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n lhs[5 * n + 2][i][j][k] = 1.0;\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "\n#pragma omp parallel for private (i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n lhs[5 * n + 2][i][j][k] = 1.0;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n lhs[5 * n + 2][i][j][k] = 1.0;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (ru1,i) firstprivate (dx1,dx2,dx5,dxmax,c1c5,c3c4,con43)", "context_chars": 100, "text": "j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n ru1 = c3c4 * rho_i[i][j][k];\n cv[i] = us[i][j][k];\n rhon[i] = (dx2 + con43 * ru1 > ((dx5 + c1c5 * ru1 > ((dxmax + ru1 > dx1?dxmax + ru1 : dx1))?dx5 + c1c5 * ru1 : ((dxmax + ru1 > dx1?dxmax + ru1 : dx1))))?dx2 + con43 * ru1 : ((dx5 + c1c5 * ru1 > ((dxmax + ru1 > dx1?dxmax + ru1 : dx1))?dx5 + c1c5 * ru1 : ((dxmax + ru1 > dx1?dxmax + ru1 : dx1)))));\n } #pragma omp parallel for private (ru1,i) firstprivate (dx1,dx2,dx5,dxmax,c1c5,c3c4,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i) firstprivate (dttx1,dttx2,c2dttx1)", "context_chars": 100, "text": "1?dxmax + ru1 : dx1))?dx5 + c1c5 * ru1 : ((dxmax + ru1 > dx1?dxmax + ru1 : dx1)))));\n }\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n lhs[0][i][j][k] = 0.0;\n lhs[1][i][j][k] = -dttx2 * cv[i - 1] - dttx1 * rhon[i - 1];\n lhs[2][i][j][k] = 1.0 + c2dttx1 * rhon[i];\n lhs[3][i][j][k] = dttx2 * cv[i + 1] - dttx1 * rhon[i + 1];\n lhs[4][i][j][k] = 0.0;\n } #pragma omp parallel for private (i) firstprivate (dttx1,dttx2,c2dttx1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n i = 1;\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n lhs[1][i + 1][j][k] = lhs[1][i + 1][j][k] - comz4;\n lhs[2][i + 1][j][k] = lhs[2][i + 1][j][k] + comz6;\n lhs[3][i + 1][j][k] = lhs[3][i + 1][j][k] - comz4;\n lhs[4][i + 1][j][k] = lhs[4][i + 1][j][k] + comz1;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,i)", "context_chars": 100, "text": " 1;\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n lhs[1][i + 1][j][k] = lhs[1][i + 1][j][k] - comz4;\n lhs[2][i + 1][j][k] = lhs[2][i + 1][j][k] + comz6;\n lhs[3][i + 1][j][k] = lhs[3][i + 1][j][k] - comz4;\n lhs[4][i + 1][j][k] = lhs[4][i + 1][j][k] + comz1;\n } #pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": " lhs[3][i + 1][j][k] - comz4;\n lhs[4][i + 1][j][k] = lhs[4][i + 1][j][k] + comz1;\n }\n }\n \nfor (i = 3; i <= grid_points[0] - 4; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "}\n \n#pragma omp parallel for private (i,j,k)\n for (i = 3; i <= grid_points[0] - 4; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)", "context_chars": 100, "text": " \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n } #pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": ";\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n }\n i = grid_points[0] - 3;\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[0][i + 1][j][k] = lhs[0][i + 1][j][k] + comz1;\n lhs[1][i + 1][j][k] = lhs[1][i + 1][j][k] - comz4;\n lhs[2][i + 1][j][k] = lhs[2][i + 1][j][k] + comz5;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,i)", "context_chars": 100, "text": " 3;\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[0][i + 1][j][k] = lhs[0][i + 1][j][k] + comz1;\n lhs[1][i + 1][j][k] = lhs[1][i + 1][j][k] - comz4;\n lhs[2][i + 1][j][k] = lhs[2][i + 1][j][k] + comz5;\n } #pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "ng to \nc the first \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dttx2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0 + 5][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttx2 * speed[i - 1][j][k];\n lhs[2 + 5][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttx2 * speed[i + 1][j][k];\n lhs[4 + 5][i][j][k] = lhs[4][i][j][k];\n lhs[0 + 10][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttx2 * speed[i - 1][j][k];\n lhs[2 + 10][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttx2 * speed[i + 1][j][k];\n lhs[4 + 10][i][j][k] = lhs[4][i][j][k];\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "/\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dttx2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0 + 5][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttx2 * speed[i - 1][j][k];\n lhs[2 + 5][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttx2 * speed[i + 1][j][k];\n lhs[4 + 5][i][j][k] = lhs[4][i][j][k];\n lhs[0 + 10][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttx2 * speed[i - 1][j][k];\n lhs[2 + 10][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttx2 * speed[i + 1][j][k];\n lhs[4 + 10][i][j][k] = lhs[4][i][j][k];\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dttx2)", "context_chars": 100, "text": " \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0 + 5][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttx2 * speed[i - 1][j][k];\n lhs[2 + 5][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttx2 * speed[i + 1][j][k];\n lhs[4 + 5][i][j][k] = lhs[4][i][j][k];\n lhs[0 + 10][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttx2 * speed[i - 1][j][k];\n lhs[2 + 10][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttx2 * speed[i + 1][j][k];\n lhs[4 + 10][i][j][k] = lhs[4][i][j][k];\n } #pragma omp parallel for private (k) firstprivate (dttx2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (ru1,j) firstprivate (dy1,dy3,dy5,dymax,c1c5,c3c4,con43)", "context_chars": 100, "text": "i = 1; i <= grid_points[0] - 2; i += 1) {\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n ru1 = c3c4 * rho_i[i][j][k];\n cv[j] = vs[i][j][k];\n rhoq[j] = (dy3 + con43 * ru1 > ((dy5 + c1c5 * ru1 > ((dymax + ru1 > dy1?dymax + ru1 : dy1))?dy5 + c1c5 * ru1 : ((dymax + ru1 > dy1?dymax + ru1 : dy1))))?dy3 + con43 * ru1 : ((dy5 + c1c5 * ru1 > ((dymax + ru1 > dy1?dymax + ru1 : dy1))?dy5 + c1c5 * ru1 : ((dymax + ru1 > dy1?dymax + ru1 : dy1)))));\n } #pragma omp parallel for private (ru1,j) firstprivate (dy1,dy3,dy5,dymax,c1c5,c3c4,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (dtty1,dtty2,c2dtty1)", "context_chars": 100, "text": "1?dymax + ru1 : dy1))?dy5 + c1c5 * ru1 : ((dymax + ru1 > dy1?dymax + ru1 : dy1)))));\n }\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n lhs[0][i][j][k] = 0.0;\n lhs[1][i][j][k] = -dtty2 * cv[j - 1] - dtty1 * rhoq[j - 1];\n lhs[2][i][j][k] = 1.0 + c2dtty1 * rhoq[j];\n lhs[3][i][j][k] = dtty2 * cv[j + 1] - dtty1 * rhoq[j + 1];\n lhs[4][i][j][k] = 0.0;\n } #pragma omp parallel for private (j) firstprivate (dtty1,dtty2,c2dtty1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n j = 1;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n lhs[1][i][j + 1][k] = lhs[1][i][j + 1][k] - comz4;\n lhs[2][i][j + 1][k] = lhs[2][i][j + 1][k] + comz6;\n lhs[3][i][j + 1][k] = lhs[3][i][j + 1][k] - comz4;\n lhs[4][i][j + 1][k] = lhs[4][i][j + 1][k] + comz1;\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,j)", "context_chars": 100, "text": " 1;\n \n#pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n lhs[1][i][j + 1][k] = lhs[1][i][j + 1][k] - comz4;\n lhs[2][i][j + 1][k] = lhs[2][i][j + 1][k] + comz6;\n lhs[3][i][j + 1][k] = lhs[3][i][j + 1][k] - comz4;\n lhs[4][i][j + 1][k] = lhs[4][i][j + 1][k] + comz1;\n } #pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": " lhs[3][i][j + 1][k] - comz4;\n lhs[4][i][j + 1][k] = lhs[4][i][j + 1][k] + comz1;\n }\n }\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 3; j <= grid_points[1] - 4; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "}\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 3; j <= grid_points[1] - 4; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)", "context_chars": 100, "text": " \n#pragma omp parallel for private (j,k)\n for (j = 3; j <= grid_points[1] - 4; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n } #pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": ";\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n }\n j = grid_points[1] - 3;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[0][i][j + 1][k] = lhs[0][i][j + 1][k] + comz1;\n lhs[1][i][j + 1][k] = lhs[1][i][j + 1][k] - comz4;\n lhs[2][i][j + 1][k] = lhs[2][i][j + 1][k] + comz5;\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,j)", "context_chars": 100, "text": " 3;\n \n#pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[0][i][j + 1][k] = lhs[0][i][j + 1][k] + comz1;\n lhs[1][i][j + 1][k] = lhs[1][i][j + 1][k] - comz4;\n lhs[2][i][j + 1][k] = lhs[2][i][j + 1][k] + comz5;\n } #pragma omp parallel for private (k) firstprivate (comz1,comz4,comz5,comz6,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "ctors \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dtty2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0 + 5][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dtty2 * speed[i][j - 1][k];\n lhs[2 + 5][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dtty2 * speed[i][j + 1][k];\n lhs[4 + 5][i][j][k] = lhs[4][i][j][k];\n lhs[0 + 10][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dtty2 * speed[i][j - 1][k];\n lhs[2 + 10][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dtty2 * speed[i][j + 1][k];\n lhs[4 + 10][i][j][k] = lhs[4][i][j][k];\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "/\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dtty2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0 + 5][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dtty2 * speed[i][j - 1][k];\n lhs[2 + 5][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dtty2 * speed[i][j + 1][k];\n lhs[4 + 5][i][j][k] = lhs[4][i][j][k];\n lhs[0 + 10][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dtty2 * speed[i][j - 1][k];\n lhs[2 + 10][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dtty2 * speed[i][j + 1][k];\n lhs[4 + 10][i][j][k] = lhs[4][i][j][k];\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dtty2)", "context_chars": 100, "text": " \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0 + 5][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dtty2 * speed[i][j - 1][k];\n lhs[2 + 5][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dtty2 * speed[i][j + 1][k];\n lhs[4 + 5][i][j][k] = lhs[4][i][j][k];\n lhs[0 + 10][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dtty2 * speed[i][j - 1][k];\n lhs[2 + 10][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dtty2 * speed[i][j + 1][k];\n lhs[4 + 10][i][j][k] = lhs[4][i][j][k];\n } #pragma omp parallel for private (k) firstprivate (dtty2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (ru1,k) firstprivate (dz1,dz4,dz5,dzmax,c1c5,c3c4,con43)", "context_chars": 100, "text": "i = 1; i <= grid_points[0] - 2; i += 1) {\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n ru1 = c3c4 * rho_i[i][j][k];\n cv[k] = ws[i][j][k];\n rhos[k] = (dz4 + con43 * ru1 > ((dz5 + c1c5 * ru1 > ((dzmax + ru1 > dz1?dzmax + ru1 : dz1))?dz5 + c1c5 * ru1 : ((dzmax + ru1 > dz1?dzmax + ru1 : dz1))))?dz4 + con43 * ru1 : ((dz5 + c1c5 * ru1 > ((dzmax + ru1 > dz1?dzmax + ru1 : dz1))?dz5 + c1c5 * ru1 : ((dzmax + ru1 > dz1?dzmax + ru1 : dz1)))));\n } #pragma omp parallel for private (ru1,k) firstprivate (dz1,dz4,dz5,dzmax,c1c5,c3c4,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dttz1,dttz2,c2dttz1)", "context_chars": 100, "text": "1?dzmax + ru1 : dz1))?dz5 + c1c5 * ru1 : ((dzmax + ru1 > dz1?dzmax + ru1 : dz1)))));\n }\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0][i][j][k] = 0.0;\n lhs[1][i][j][k] = -dttz2 * cv[k - 1] - dttz1 * rhos[k - 1];\n lhs[2][i][j][k] = 1.0 + c2dttz1 * rhos[k];\n lhs[3][i][j][k] = dttz2 * cv[k + 1] - dttz1 * rhos[k + 1];\n lhs[4][i][j][k] = 0.0;\n } #pragma omp parallel for private (k) firstprivate (dttz1,dttz2,c2dttz1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n k = 1;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (comz1,comz4,comz5,comz6,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n lhs[1][i][j][k + 1] = lhs[1][i][j][k + 1] - comz4;\n lhs[2][i][j][k + 1] = lhs[2][i][j][k + 1] + comz6;\n lhs[3][i][j][k + 1] = lhs[3][i][j][k + 1] - comz4;\n lhs[4][i][j][k + 1] = lhs[4][i][j][k + 1] + comz1;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (comz1,comz4,comz5,comz6,k)", "context_chars": 100, "text": " 1;\n \n#pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n lhs[1][i][j][k + 1] = lhs[1][i][j][k + 1] - comz4;\n lhs[2][i][j][k + 1] = lhs[2][i][j][k + 1] + comz6;\n lhs[3][i][j][k + 1] = lhs[3][i][j][k + 1] - comz4;\n lhs[4][i][j][k + 1] = lhs[4][i][j][k + 1] + comz1;\n } #pragma omp parallel for private (j) firstprivate (comz1,comz4,comz5,comz6,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": " lhs[3][i][j][k + 1] - comz4;\n lhs[4][i][j][k + 1] = lhs[4][i][j][k + 1] + comz1;\n }\n }\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)\n for (k = 3; k <= grid_points[2] - 4; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "}\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)\n for (k = 3; k <= grid_points[2] - 4; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)", "context_chars": 100, "text": " \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 3; k <= grid_points[2] - 4; k += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n } #pragma omp parallel for private (k) firstprivate (comz1,comz4,comz6)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": ";\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n }\n }\n k = grid_points[2] - 3;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (comz1,comz4,comz5,comz6,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[0][i][j][k + 1] = lhs[0][i][j][k + 1] + comz1;\n lhs[1][i][j][k + 1] = lhs[1][i][j][k + 1] - comz4;\n lhs[2][i][j][k + 1] = lhs[2][i][j][k + 1] + comz5;\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (comz1,comz4,comz5,comz6,k)", "context_chars": 100, "text": " 3;\n \n#pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[0][i][j][k + 1] = lhs[0][i][j][k + 1] + comz1;\n lhs[1][i][j][k + 1] = lhs[1][i][j][k + 1] - comz4;\n lhs[2][i][j][k + 1] = lhs[2][i][j][k + 1] + comz5;\n } #pragma omp parallel for private (j) firstprivate (comz1,comz4,comz5,comz6,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "her factors (u+c), (u-c) \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dttz2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0 + 5][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttz2 * speed[i][j][k - 1];\n lhs[2 + 5][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttz2 * speed[i][j][k + 1];\n lhs[4 + 5][i][j][k] = lhs[4][i][j][k];\n lhs[0 + 10][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttz2 * speed[i][j][k - 1];\n lhs[2 + 10][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttz2 * speed[i][j][k + 1];\n lhs[4 + 10][i][j][k] = lhs[4][i][j][k];\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "/\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dttz2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0 + 5][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttz2 * speed[i][j][k - 1];\n lhs[2 + 5][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttz2 * speed[i][j][k + 1];\n lhs[4 + 5][i][j][k] = lhs[4][i][j][k];\n lhs[0 + 10][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttz2 * speed[i][j][k - 1];\n lhs[2 + 10][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttz2 * speed[i][j][k + 1];\n lhs[4 + 10][i][j][k] = lhs[4][i][j][k];\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dttz2)", "context_chars": 100, "text": " \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n lhs[0 + 5][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttz2 * speed[i][j][k - 1];\n lhs[2 + 5][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttz2 * speed[i][j][k + 1];\n lhs[4 + 5][i][j][k] = lhs[4][i][j][k];\n lhs[0 + 10][i][j][k] = lhs[0][i][j][k];\n lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttz2 * speed[i][j][k - 1];\n lhs[2 + 10][i][j][k] = lhs[2][i][j][k];\n lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttz2 * speed[i][j][k + 1];\n lhs[4 + 10][i][j][k] = lhs[4][i][j][k];\n } #pragma omp parallel for private (k) firstprivate (dttz2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,i,j,k)", "context_chars": 100, "text": "nt k;\n double r1;\n double r2;\n double r3;\n double r4;\n double r5;\n double t1;\n double t2;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,k) firstprivate (bt)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = bt * r3;\n t2 = 0.5 * (r4 + r5);\n rhs[0][i][j][k] = -r2;\n rhs[1][i][j][k] = r1;\n rhs[2][i][j][k] = bt * (r4 - r5);\n rhs[3][i][j][k] = -t1 + t2;\n rhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,j,k)", "context_chars": 100, "text": "llel for private (r1,r2,r3,r4,r5,t1,t2,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,k) firstprivate (bt)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = bt * r3;\n t2 = 0.5 * (r4 + r5);\n rhs[0][i][j][k] = -r2;\n rhs[1][i][j][k] = r1;\n rhs[2][i][j][k] = bt * (r4 - r5);\n rhs[3][i][j][k] = -t1 + t2;\n rhs[4][i][j][k] = t1 + t2;\n }\n } #pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,k) firstprivate (bt)", "context_chars": 100, "text": "el for private (r1,r2,r3,r4,r5,t1,t2,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = bt * r3;\n t2 = 0.5 * (r4 + r5);\n rhs[0][i][j][k] = -r2;\n rhs[1][i][j][k] = r1;\n rhs[2][i][j][k] = bt * (r4 - r5);\n rhs[3][i][j][k] = -t1 + t2;\n rhs[4][i][j][k] = t1 + t2;\n } #pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,k) firstprivate (bt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,i,j,k)", "context_chars": 100, "text": "nt k;\n double r1;\n double r2;\n double r3;\n double r4;\n double r5;\n double t1;\n double t2;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,k) firstprivate (bt)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = bt * r1;\n t2 = 0.5 * (r4 + r5);\n rhs[0][i][j][k] = bt * (r4 - r5);\n rhs[1][i][j][k] = -r3;\n rhs[2][i][j][k] = r2;\n rhs[3][i][j][k] = -t1 + t2;\n rhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,j,k)", "context_chars": 100, "text": "llel for private (r1,r2,r3,r4,r5,t1,t2,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,k) firstprivate (bt)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = bt * r1;\n t2 = 0.5 * (r4 + r5);\n rhs[0][i][j][k] = bt * (r4 - r5);\n rhs[1][i][j][k] = -r3;\n rhs[2][i][j][k] = r2;\n rhs[3][i][j][k] = -t1 + t2;\n rhs[4][i][j][k] = t1 + t2;\n }\n } #pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,k) firstprivate (bt)", "context_chars": 100, "text": "el for private (r1,r2,r3,r4,r5,t1,t2,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = bt * r1;\n t2 = 0.5 * (r4 + r5);\n rhs[0][i][j][k] = bt * (r4 - r5);\n rhs[1][i][j][k] = -r3;\n rhs[2][i][j][k] = r2;\n rhs[3][i][j][k] = -t1 + t2;\n rhs[4][i][j][k] = t1 + t2;\n } #pragma omp parallel for private (r1,r2,r3,r4,r5,t1,t2,k) firstprivate (bt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "dary \nc-------------------------------------------------------------------*/\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n rhs[m][i][j][k] = forcing[m][i][j][k];\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": "--------*/\n \n#pragma omp parallel for private (i,j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 0; i <= grid_points[0] - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n rhs[m][i][j][k] = forcing[m][i][j][k];\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "agma omp parallel for private (i,j,k)\n for (i = 0; i <= grid_points[0] - 1; i += 1) {\n \nfor (j = 0; j <= grid_points[1] - 1; j += 1) {\n \n#pragma omp parallel for private (k)\n for (k = 0; k <= grid_points[2] - 1; k += 1) {\n rhs[m][i][j][k] = forcing[m][i][j][k];\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": "ma omp parallel for private (j,k)\n for (j = 0; j <= grid_points[1] - 1; j += 1) {\n \nfor (k = 0; k <= grid_points[2] - 1; k += 1) {\n rhs[m][i][j][k] = forcing[m][i][j][k];\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (uijk,up1,um1,i,j,k)", "context_chars": 100, "text": "te xi-direction fluxes \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (uijk,up1,um1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (uijk,up1,um1,k) firstprivate (tx2,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n uijk = us[i][j][k];\n up1 = us[i + 1][j][k];\n um1 = us[i - 1][j][k];\n rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * (u[0][i + 1][j][k] - 2.0 * u[0][i][j][k] + u[0][i - 1][j][k]) - tx2 * (u[1][i + 1][j][k] - u[1][i - 1][j][k]);\n rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * (u[1][i + 1][j][k] - 2.0 * u[1][i][j][k] + u[1][i - 1][j][k]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[1][i + 1][j][k] * up1 - u[1][i - 1][j][k] * um1 + (u[4][i + 1][j][k] - square[i + 1][j][k] - u[4][i - 1][j][k] + square[i - 1][j][k]) * c2);\n rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * (u[2][i + 1][j][k] - 2.0 * u[2][i][j][k] + u[2][i - 1][j][k]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[2][i + 1][j][k] * up1 - u[2][i - 1][j][k] * um1);\n rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * (u[3][i + 1][j][k] - 2.0 * u[3][i][j][k] + u[3][i - 1][j][k]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[3][i + 1][j][k] * up1 - u[3][i - 1][j][k] * um1);\n rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * (u[4][i + 1][j][k] - 2.0 * u[4][i][j][k] + u[4][i - 1][j][k]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[4][i + 1][j][k] * rho_i[i + 1][j][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i - 1][j][k] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[4][i + 1][j][k] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[4][i - 1][j][k] - c2 * square[i - 1][j][k]) * um1);\n }\n }\n } #pragma omp parallel for private (uijk,up1,um1,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (uijk,up1,um1,j,k)", "context_chars": 100, "text": "parallel for private (uijk,up1,um1,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (uijk,up1,um1,k) firstprivate (tx2,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n uijk = us[i][j][k];\n up1 = us[i + 1][j][k];\n um1 = us[i - 1][j][k];\n rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * (u[0][i + 1][j][k] - 2.0 * u[0][i][j][k] + u[0][i - 1][j][k]) - tx2 * (u[1][i + 1][j][k] - u[1][i - 1][j][k]);\n rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * (u[1][i + 1][j][k] - 2.0 * u[1][i][j][k] + u[1][i - 1][j][k]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[1][i + 1][j][k] * up1 - u[1][i - 1][j][k] * um1 + (u[4][i + 1][j][k] - square[i + 1][j][k] - u[4][i - 1][j][k] + square[i - 1][j][k]) * c2);\n rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * (u[2][i + 1][j][k] - 2.0 * u[2][i][j][k] + u[2][i - 1][j][k]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[2][i + 1][j][k] * up1 - u[2][i - 1][j][k] * um1);\n rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * (u[3][i + 1][j][k] - 2.0 * u[3][i][j][k] + u[3][i - 1][j][k]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[3][i + 1][j][k] * up1 - u[3][i - 1][j][k] * um1);\n rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * (u[4][i + 1][j][k] - 2.0 * u[4][i][j][k] + u[4][i - 1][j][k]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[4][i + 1][j][k] * rho_i[i + 1][j][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i - 1][j][k] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[4][i + 1][j][k] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[4][i - 1][j][k] - c2 * square[i - 1][j][k]) * um1);\n }\n } #pragma omp parallel for private (uijk,up1,um1,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (uijk,up1,um1,k) firstprivate (tx2,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2,con43)", "context_chars": 100, "text": "rallel for private (uijk,up1,um1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n uijk = us[i][j][k];\n up1 = us[i + 1][j][k];\n um1 = us[i - 1][j][k];\n rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * (u[0][i + 1][j][k] - 2.0 * u[0][i][j][k] + u[0][i - 1][j][k]) - tx2 * (u[1][i + 1][j][k] - u[1][i - 1][j][k]);\n rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * (u[1][i + 1][j][k] - 2.0 * u[1][i][j][k] + u[1][i - 1][j][k]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[1][i + 1][j][k] * up1 - u[1][i - 1][j][k] * um1 + (u[4][i + 1][j][k] - square[i + 1][j][k] - u[4][i - 1][j][k] + square[i - 1][j][k]) * c2);\n rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * (u[2][i + 1][j][k] - 2.0 * u[2][i][j][k] + u[2][i - 1][j][k]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[2][i + 1][j][k] * up1 - u[2][i - 1][j][k] * um1);\n rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * (u[3][i + 1][j][k] - 2.0 * u[3][i][j][k] + u[3][i - 1][j][k]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[3][i + 1][j][k] * up1 - u[3][i - 1][j][k] * um1);\n rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * (u[4][i + 1][j][k] - 2.0 * u[4][i][j][k] + u[4][i - 1][j][k]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[4][i + 1][j][k] * rho_i[i + 1][j][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i - 1][j][k] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[4][i + 1][j][k] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[4][i - 1][j][k] - c2 * square[i - 1][j][k]) * um1);\n } #pragma omp parallel for private (uijk,up1,um1,k) firstprivate (tx2,xxcon2,xxcon3,xxcon4,xxcon5,dx1tx1,dx2tx1,dx3tx1,dx4tx1,dx5tx1,c1,c2,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n i = 1;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "/\n i = 1;\n \n#pragma omp parallel for private (j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp,i)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n }\n }\n }\n i = 2;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (- 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "}\n i = 2;\n \n#pragma omp parallel for private (j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (- 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp,i)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (- 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": " + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n }\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 3 * 1; i <= grid_points[0] - 3 - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": " }\n }\n \n#pragma omp parallel for private (i,j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 3 * 1; i <= grid_points[0] - 3 - 1; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": " parallel for private (i,j,k)\n for (i = 3 * 1; i <= grid_points[0] - 3 - 1; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp)", "context_chars": 100, "text": "ma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "1][j][k] + u[m][i + 2][j][k]);\n }\n }\n }\n }\n i = grid_points[0] - 3;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "ints[0] - 3;\n \n#pragma omp parallel for private (j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k]);\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp,i)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": " u[m][i][j][k] - 4.0 * u[m][i + 1][j][k]);\n }\n }\n }\n i = grid_points[0] - 2;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 5.0 * u[m][i][j][k]);\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "ints[0] - 2;\n \n#pragma omp parallel for private (j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,i)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 5.0 * u[m][i][j][k]);\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp,i)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 5.0 * u[m][i][j][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp,i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (vijk,vp1,vm1,i,j,k)", "context_chars": 100, "text": "e eta-direction fluxes \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (vijk,vp1,vm1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (vijk,vp1,vm1,k) firstprivate (ty2,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n vijk = vs[i][j][k];\n vp1 = vs[i][j + 1][k];\n vm1 = vs[i][j - 1][k];\n rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * (u[0][i][j + 1][k] - 2.0 * u[0][i][j][k] + u[0][i][j - 1][k]) - ty2 * (u[2][i][j + 1][k] - u[2][i][j - 1][k]);\n rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * (u[1][i][j + 1][k] - 2.0 * u[1][i][j][k] + u[1][i][j - 1][k]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[1][i][j + 1][k] * vp1 - u[1][i][j - 1][k] * vm1);\n rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * (u[2][i][j + 1][k] - 2.0 * u[2][i][j][k] + u[2][i][j - 1][k]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[2][i][j + 1][k] * vp1 - u[2][i][j - 1][k] * vm1 + (u[4][i][j + 1][k] - square[i][j + 1][k] - u[4][i][j - 1][k] + square[i][j - 1][k]) * c2);\n rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * (u[3][i][j + 1][k] - 2.0 * u[3][i][j][k] + u[3][i][j - 1][k]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[3][i][j + 1][k] * vp1 - u[3][i][j - 1][k] * vm1);\n rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * (u[4][i][j + 1][k] - 2.0 * u[4][i][j][k] + u[4][i][j - 1][k]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[4][i][j + 1][k] * rho_i[i][j + 1][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j - 1][k] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[4][i][j + 1][k] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[4][i][j - 1][k] - c2 * square[i][j - 1][k]) * vm1);\n }\n }\n } #pragma omp parallel for private (vijk,vp1,vm1,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (vijk,vp1,vm1,j,k)", "context_chars": 100, "text": "parallel for private (vijk,vp1,vm1,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (vijk,vp1,vm1,k) firstprivate (ty2,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n vijk = vs[i][j][k];\n vp1 = vs[i][j + 1][k];\n vm1 = vs[i][j - 1][k];\n rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * (u[0][i][j + 1][k] - 2.0 * u[0][i][j][k] + u[0][i][j - 1][k]) - ty2 * (u[2][i][j + 1][k] - u[2][i][j - 1][k]);\n rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * (u[1][i][j + 1][k] - 2.0 * u[1][i][j][k] + u[1][i][j - 1][k]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[1][i][j + 1][k] * vp1 - u[1][i][j - 1][k] * vm1);\n rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * (u[2][i][j + 1][k] - 2.0 * u[2][i][j][k] + u[2][i][j - 1][k]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[2][i][j + 1][k] * vp1 - u[2][i][j - 1][k] * vm1 + (u[4][i][j + 1][k] - square[i][j + 1][k] - u[4][i][j - 1][k] + square[i][j - 1][k]) * c2);\n rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * (u[3][i][j + 1][k] - 2.0 * u[3][i][j][k] + u[3][i][j - 1][k]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[3][i][j + 1][k] * vp1 - u[3][i][j - 1][k] * vm1);\n rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * (u[4][i][j + 1][k] - 2.0 * u[4][i][j][k] + u[4][i][j - 1][k]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[4][i][j + 1][k] * rho_i[i][j + 1][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j - 1][k] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[4][i][j + 1][k] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[4][i][j - 1][k] - c2 * square[i][j - 1][k]) * vm1);\n }\n } #pragma omp parallel for private (vijk,vp1,vm1,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (vijk,vp1,vm1,k) firstprivate (ty2,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2,con43)", "context_chars": 100, "text": "rallel for private (vijk,vp1,vm1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n vijk = vs[i][j][k];\n vp1 = vs[i][j + 1][k];\n vm1 = vs[i][j - 1][k];\n rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * (u[0][i][j + 1][k] - 2.0 * u[0][i][j][k] + u[0][i][j - 1][k]) - ty2 * (u[2][i][j + 1][k] - u[2][i][j - 1][k]);\n rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * (u[1][i][j + 1][k] - 2.0 * u[1][i][j][k] + u[1][i][j - 1][k]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[1][i][j + 1][k] * vp1 - u[1][i][j - 1][k] * vm1);\n rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * (u[2][i][j + 1][k] - 2.0 * u[2][i][j][k] + u[2][i][j - 1][k]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[2][i][j + 1][k] * vp1 - u[2][i][j - 1][k] * vm1 + (u[4][i][j + 1][k] - square[i][j + 1][k] - u[4][i][j - 1][k] + square[i][j - 1][k]) * c2);\n rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * (u[3][i][j + 1][k] - 2.0 * u[3][i][j][k] + u[3][i][j - 1][k]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[3][i][j + 1][k] * vp1 - u[3][i][j - 1][k] * vm1);\n rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * (u[4][i][j + 1][k] - 2.0 * u[4][i][j][k] + u[4][i][j - 1][k]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[4][i][j + 1][k] * rho_i[i][j + 1][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j - 1][k] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[4][i][j + 1][k] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[4][i][j - 1][k] - c2 * square[i][j - 1][k]) * vm1);\n } #pragma omp parallel for private (vijk,vp1,vm1,k) firstprivate (ty2,yycon2,yycon3,yycon4,yycon5,dy1ty1,dy2ty1,dy3ty1,dy4ty1,dy5ty1,c1,c2,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": "ion \nc-------------------------------------------------------------------*/\n j = 1;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": "/\n j = 1;\n \n#pragma omp parallel for private (i,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp,j)", "context_chars": 100, "text": "pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": "m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n }\n }\n }\n j = 2;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (- 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": "}\n j = 2;\n \n#pragma omp parallel for private (i,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (- 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp,j)", "context_chars": 100, "text": "pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (- 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": " + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n }\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 3 * 1; j <= grid_points[1] - 3 - 1; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": " }\n }\n \n#pragma omp parallel for private (i,j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 3 * 1; j <= grid_points[1] - 3 - 1; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "agma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 3 * 1; j <= grid_points[1] - 3 - 1; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp)", "context_chars": 100, "text": "arallel for private (j,k)\n for (j = 3 * 1; j <= grid_points[1] - 3 - 1; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": " + 1][k] + u[m][i][j + 2][k]);\n }\n }\n }\n }\n j = grid_points[1] - 3;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k]);\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": "ints[1] - 3;\n \n#pragma omp parallel for private (i,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k]);\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp,j)", "context_chars": 100, "text": "pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": " u[m][i][j][k] - 4.0 * u[m][i][j + 1][k]);\n }\n }\n }\n j = grid_points[1] - 2;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 5.0 * u[m][i][j][k]);\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": "ints[1] - 2;\n \n#pragma omp parallel for private (i,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp,j)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 5.0 * u[m][i][j][k]);\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp,j)", "context_chars": 100, "text": "pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 5.0 * u[m][i][j][k]);\n } #pragma omp parallel for private (k) firstprivate (dssp,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (wm1,wijk,wp1,i,j,k)", "context_chars": 100, "text": " zeta-direction fluxes \nc-------------------------------------------------------------------*/\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (wm1,wijk,wp1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (wm1,wijk,wp1,k) firstprivate (tz2,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n wijk = ws[i][j][k];\n wp1 = ws[i][j][k + 1];\n wm1 = ws[i][j][k - 1];\n rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * (u[0][i][j][k + 1] - 2.0 * u[0][i][j][k] + u[0][i][j][k - 1]) - tz2 * (u[3][i][j][k + 1] - u[3][i][j][k - 1]);\n rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * (u[1][i][j][k + 1] - 2.0 * u[1][i][j][k] + u[1][i][j][k - 1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[1][i][j][k + 1] * wp1 - u[1][i][j][k - 1] * wm1);\n rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * (u[2][i][j][k + 1] - 2.0 * u[2][i][j][k] + u[2][i][j][k - 1]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[2][i][j][k + 1] * wp1 - u[2][i][j][k - 1] * wm1);\n rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * (u[3][i][j][k + 1] - 2.0 * u[3][i][j][k] + u[3][i][j][k - 1]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[3][i][j][k + 1] * wp1 - u[3][i][j][k - 1] * wm1 + (u[4][i][j][k + 1] - square[i][j][k + 1] - u[4][i][j][k - 1] + square[i][j][k - 1]) * c2);\n rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * (u[4][i][j][k + 1] - 2.0 * u[4][i][j][k] + u[4][i][j][k - 1]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[4][i][j][k + 1] * rho_i[i][j][k + 1] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j][k - 1] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[4][i][j][k + 1] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[4][i][j][k - 1] - c2 * square[i][j][k - 1]) * wm1);\n }\n }\n } #pragma omp parallel for private (wm1,wijk,wp1,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (wm1,wijk,wp1,j,k)", "context_chars": 100, "text": "parallel for private (wm1,wijk,wp1,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (wm1,wijk,wp1,k) firstprivate (tz2,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2,con43)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n wijk = ws[i][j][k];\n wp1 = ws[i][j][k + 1];\n wm1 = ws[i][j][k - 1];\n rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * (u[0][i][j][k + 1] - 2.0 * u[0][i][j][k] + u[0][i][j][k - 1]) - tz2 * (u[3][i][j][k + 1] - u[3][i][j][k - 1]);\n rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * (u[1][i][j][k + 1] - 2.0 * u[1][i][j][k] + u[1][i][j][k - 1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[1][i][j][k + 1] * wp1 - u[1][i][j][k - 1] * wm1);\n rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * (u[2][i][j][k + 1] - 2.0 * u[2][i][j][k] + u[2][i][j][k - 1]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[2][i][j][k + 1] * wp1 - u[2][i][j][k - 1] * wm1);\n rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * (u[3][i][j][k + 1] - 2.0 * u[3][i][j][k] + u[3][i][j][k - 1]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[3][i][j][k + 1] * wp1 - u[3][i][j][k - 1] * wm1 + (u[4][i][j][k + 1] - square[i][j][k + 1] - u[4][i][j][k - 1] + square[i][j][k - 1]) * c2);\n rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * (u[4][i][j][k + 1] - 2.0 * u[4][i][j][k] + u[4][i][j][k - 1]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[4][i][j][k + 1] * rho_i[i][j][k + 1] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j][k - 1] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[4][i][j][k + 1] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[4][i][j][k - 1] - c2 * square[i][j][k - 1]) * wm1);\n }\n } #pragma omp parallel for private (wm1,wijk,wp1,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (wm1,wijk,wp1,k) firstprivate (tz2,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2,con43)", "context_chars": 100, "text": "rallel for private (wm1,wijk,wp1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n wijk = ws[i][j][k];\n wp1 = ws[i][j][k + 1];\n wm1 = ws[i][j][k - 1];\n rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * (u[0][i][j][k + 1] - 2.0 * u[0][i][j][k] + u[0][i][j][k - 1]) - tz2 * (u[3][i][j][k + 1] - u[3][i][j][k - 1]);\n rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * (u[1][i][j][k + 1] - 2.0 * u[1][i][j][k] + u[1][i][j][k - 1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[1][i][j][k + 1] * wp1 - u[1][i][j][k - 1] * wm1);\n rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * (u[2][i][j][k + 1] - 2.0 * u[2][i][j][k] + u[2][i][j][k - 1]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[2][i][j][k + 1] * wp1 - u[2][i][j][k - 1] * wm1);\n rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * (u[3][i][j][k + 1] - 2.0 * u[3][i][j][k] + u[3][i][j][k - 1]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[3][i][j][k + 1] * wp1 - u[3][i][j][k - 1] * wm1 + (u[4][i][j][k + 1] - square[i][j][k + 1] - u[4][i][j][k - 1] + square[i][j][k - 1]) * c2);\n rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * (u[4][i][j][k + 1] - 2.0 * u[4][i][j][k] + u[4][i][j][k - 1]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[4][i][j][k + 1] * rho_i[i][j][k + 1] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j][k - 1] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[4][i][j][k + 1] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[4][i][j][k - 1] - c2 * square[i][j][k - 1]) * wm1);\n } #pragma omp parallel for private (wm1,wijk,wp1,k) firstprivate (tz2,zzcon2,zzcon3,zzcon4,zzcon5,dz1tz1,dz2tz1,dz3tz1,dz4tz1,dz5tz1,c1,c2,con43)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n k = 1;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (dssp,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "/\n k = 1;\n \n#pragma omp parallel for private (i,j,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (dssp,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (dssp,k)", "context_chars": 100, "text": "pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n } #pragma omp parallel for private (j) firstprivate (dssp,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": "m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n }\n }\n }\n k = 2;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (dssp,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (- 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "}\n k = 2;\n \n#pragma omp parallel for private (i,j,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (dssp,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (- 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (dssp,k)", "context_chars": 100, "text": "pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (- 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n } #pragma omp parallel for private (j) firstprivate (dssp,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": " + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n }\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp)\n for (k = 3 * 1; k <= grid_points[2] - 3 - 1; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": " }\n }\n \n#pragma omp parallel for private (i,j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp)\n for (k = 3 * 1; k <= grid_points[2] - 3 - 1; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "agma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dssp)\n for (k = 3 * 1; k <= grid_points[2] - 3 - 1; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dssp)", "context_chars": 100, "text": "ma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 3 * 1; k <= grid_points[2] - 3 - 1; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]);\n } #pragma omp parallel for private (k) firstprivate (dssp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": "][k + 1] + u[m][i][j][k + 2]);\n }\n }\n }\n }\n k = grid_points[2] - 3;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (dssp,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "ints[2] - 3;\n \n#pragma omp parallel for private (i,j,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (dssp,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1]);\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (dssp,k)", "context_chars": 100, "text": "pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1]);\n } #pragma omp parallel for private (j) firstprivate (dssp,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": " u[m][i][j][k] - 4.0 * u[m][i][j][k + 1]);\n }\n }\n }\n k = grid_points[2] - 2;\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (dssp,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 5.0 * u[m][i][j][k]);\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": "ints[2] - 2;\n \n#pragma omp parallel for private (i,j,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (dssp,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 5.0 * u[m][i][j][k]);\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (dssp,k)", "context_chars": 100, "text": "pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 5.0 * u[m][i][j][k]);\n } #pragma omp parallel for private (j) firstprivate (dssp,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k,m)", "context_chars": 100, "text": "* (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 5.0 * u[m][i][j][k]);\n }\n }\n }\n \nfor (m = 0; m <= 4; m += 1) {\n \n#pragma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dt)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n }\n }\n }\n } #pragma omp parallel for private (i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,k)", "context_chars": 100, "text": " }\n }\n \n#pragma omp parallel for private (i,j,k,m)\n for (m = 0; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dt)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n }\n }\n } #pragma omp parallel for private (i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "agma omp parallel for private (i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (dt)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (dt)", "context_chars": 100, "text": "ma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n } #pragma omp parallel for private (k) firstprivate (dt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,i,j,k)", "context_chars": 100, "text": ";\n double ww;\n double r1;\n double r2;\n double r3;\n double r4;\n double r5;\n double ac2inv;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,k) firstprivate (c2,bt)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n ru1 = rho_i[i][j][k];\n uu = us[i][j][k];\n vv = vs[i][j][k];\n ww = ws[i][j][k];\n ac = speed[i][j][k];\n ac2inv = ainv[i][j][k] * ainv[i][j][k];\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = c2 * ac2inv * (qs[i][j][k] * r1 - uu * r2 - vv * r3 - ww * r4 + r5);\n t2 = bt * ru1 * (uu * r1 - r2);\n t3 = bt * ru1 * ac * t1;\n rhs[0][i][j][k] = r1 - t1;\n rhs[1][i][j][k] = -ru1 * (ww * r1 - r4);\n rhs[2][i][j][k] = ru1 * (vv * r1 - r3);\n rhs[3][i][j][k] = -t2 + t3;\n rhs[4][i][j][k] = t2 + t3;\n }\n }\n } #pragma omp parallel for private (t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,j,k)", "context_chars": 100, "text": ",ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,k) firstprivate (c2,bt)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n ru1 = rho_i[i][j][k];\n uu = us[i][j][k];\n vv = vs[i][j][k];\n ww = ws[i][j][k];\n ac = speed[i][j][k];\n ac2inv = ainv[i][j][k] * ainv[i][j][k];\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = c2 * ac2inv * (qs[i][j][k] * r1 - uu * r2 - vv * r3 - ww * r4 + r5);\n t2 = bt * ru1 * (uu * r1 - r2);\n t3 = bt * ru1 * ac * t1;\n rhs[0][i][j][k] = r1 - t1;\n rhs[1][i][j][k] = -ru1 * (ww * r1 - r4);\n rhs[2][i][j][k] = ru1 * (vv * r1 - r3);\n rhs[3][i][j][k] = -t2 + t3;\n rhs[4][i][j][k] = t2 + t3;\n }\n } #pragma omp parallel for private (t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,k) firstprivate (c2,bt)", "context_chars": 100, "text": "c,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n ru1 = rho_i[i][j][k];\n uu = us[i][j][k];\n vv = vs[i][j][k];\n ww = ws[i][j][k];\n ac = speed[i][j][k];\n ac2inv = ainv[i][j][k] * ainv[i][j][k];\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n t1 = c2 * ac2inv * (qs[i][j][k] * r1 - uu * r2 - vv * r3 - ww * r4 + r5);\n t2 = bt * ru1 * (uu * r1 - r2);\n t3 = bt * ru1 * ac * t1;\n rhs[0][i][j][k] = r1 - t1;\n rhs[1][i][j][k] = -ru1 * (ww * r1 - r4);\n rhs[2][i][j][k] = ru1 * (vv * r1 - r3);\n rhs[3][i][j][k] = -t2 + t3;\n rhs[4][i][j][k] = t2 + t3;\n } #pragma omp parallel for private (t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv,k) firstprivate (c2,bt)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,i,j,k)", "context_chars": 100, "text": "uble r3;\n double r4;\n double r5;\n double btuz;\n double acinv;\n double ac2u;\n double uzik1;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,k) firstprivate (bt,c2iv)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n xvel = us[i][j][k];\n yvel = vs[i][j][k];\n zvel = ws[i][j][k];\n ac = speed[i][j][k];\n acinv = ainv[i][j][k];\n ac2u = ac * ac;\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n uzik1 = u[0][i][j][k];\n btuz = bt * uzik1;\n t1 = btuz * acinv * (r4 + r5);\n t2 = r3 + t1;\n t3 = btuz * (r4 - r5);\n rhs[0][i][j][k] = t2;\n rhs[1][i][j][k] = -uzik1 * r2 + xvel * t2;\n rhs[2][i][j][k] = uzik1 * r1 + yvel * t2;\n rhs[3][i][j][k] = zvel * t2 + t3;\n rhs[4][i][j][k] = uzik1 * (-xvel * r2 + yvel * r1) + qs[i][j][k] * t2 + c2iv * ac2u * t1 + zvel * t3;\n }\n }\n } #pragma omp parallel for private (t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,j,k)", "context_chars": 100, "text": "l,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,k) firstprivate (bt,c2iv)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n xvel = us[i][j][k];\n yvel = vs[i][j][k];\n zvel = ws[i][j][k];\n ac = speed[i][j][k];\n acinv = ainv[i][j][k];\n ac2u = ac * ac;\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n uzik1 = u[0][i][j][k];\n btuz = bt * uzik1;\n t1 = btuz * acinv * (r4 + r5);\n t2 = r3 + t1;\n t3 = btuz * (r4 - r5);\n rhs[0][i][j][k] = t2;\n rhs[1][i][j][k] = -uzik1 * r2 + xvel * t2;\n rhs[2][i][j][k] = uzik1 * r1 + yvel * t2;\n rhs[3][i][j][k] = zvel * t2 + t3;\n rhs[4][i][j][k] = uzik1 * (-xvel * r2 + yvel * r1) + qs[i][j][k] * t2 + c2iv * ac2u * t1 + zvel * t3;\n }\n } #pragma omp parallel for private (t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,k) firstprivate (bt,c2iv)", "context_chars": 100, "text": "r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n xvel = us[i][j][k];\n yvel = vs[i][j][k];\n zvel = ws[i][j][k];\n ac = speed[i][j][k];\n acinv = ainv[i][j][k];\n ac2u = ac * ac;\n r1 = rhs[0][i][j][k];\n r2 = rhs[1][i][j][k];\n r3 = rhs[2][i][j][k];\n r4 = rhs[3][i][j][k];\n r5 = rhs[4][i][j][k];\n uzik1 = u[0][i][j][k];\n btuz = bt * uzik1;\n t1 = btuz * acinv * (r4 + r5);\n t2 = r3 + t1;\n t3 = btuz * (r4 - r5);\n rhs[0][i][j][k] = t2;\n rhs[1][i][j][k] = -uzik1 * r2 + xvel * t2;\n rhs[2][i][j][k] = uzik1 * r1 + yvel * t2;\n rhs[3][i][j][k] = zvel * t2 + t3;\n rhs[4][i][j][k] = uzik1 * (-xvel * r2 + yvel * r1) + qs[i][j][k] * t2 + c2iv * ac2u * t1 + zvel * t3;\n } #pragma omp parallel for private (t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,acinv,ac2u,uzik1,k) firstprivate (bt,c2iv)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "-----------------------------------------*/\n error_norm(xce);\n compute_rhs();\n rhs_norm(xcr);\n \nfor (m = 0; m <= 4; m += 1) {\n xcr[m] = xcr[m] / dt;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " for (m = 0; m <= 4; m += 1) {\n xcr[m] = xcr[m] / dt;\n }\n *class = 'U';\n *verified = 1;\n \nfor (m = 0; m <= 4; m += 1) {\n xcrref[m] = 1.0;\n xceref[m] = 1.0;\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,j,k,m)", "context_chars": 100, "text": " = 0;\n for (i = 0; i <= grid_points[0] - 3; i += 1) {\n i1 = i + 1;\n i2 = i + 2;\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (fac1,k,m) firstprivate (n,i1,i2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n }\n lhs[n + 1][i2][j][k] = lhs[n + 1][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 2][i2][j][k] = lhs[n + 2][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n + 0][i2][j][k] * rhs[m][i][j][k];\n }\n }\n } #pragma omp parallel for private (fac1,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,k,m) firstprivate (n,i1,i2)", "context_chars": 100, "text": "omp parallel for private (fac1,j,k,m)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n }\n lhs[n + 1][i2][j][k] = lhs[n + 1][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 2][i2][j][k] = lhs[n + 2][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n + 0][i2][j][k] * rhs[m][i][j][k];\n }\n } #pragma omp parallel for private (fac1,k,m) firstprivate (n,i1,i2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (fac1)", "context_chars": 100, "text": " fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n } #pragma omp parallel for private (m) firstprivate (fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "hs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "hs[n + 2][i2][j][k] = lhs[n + 2][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n + 0][i2][j][k] * rhs[m][i][j][k];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,j,k,m)", "context_chars": 100, "text": "-----------------------------------*/\n i = grid_points[0] - 2;\n i1 = grid_points[0] - 1;\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (fac1,fac2,k,m) firstprivate (i,n,i1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1.0 / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i1][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac2)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i1][j][k] = fac2 * rhs[m][i1][j][k];\n }\n }\n } #pragma omp parallel for private (fac1,fac2,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,k,m) firstprivate (i,n,i1)", "context_chars": 100, "text": "mp parallel for private (fac1,fac2,j,k,m)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1.0 / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i1][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac2)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i1][j][k] = fac2 * rhs[m][i1][j][k];\n }\n } #pragma omp parallel for private (fac1,fac2,k,m) firstprivate (i,n,i1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (fac1)", "context_chars": 100, "text": "k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n } #pragma omp parallel for private (m) firstprivate (fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (fac2)", "context_chars": 100, "text": "----------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i1][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i1][j][k] = fac2 * rhs[m][i1][j][k];\n } #pragma omp parallel for private (m) firstprivate (fac2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,j,k)", "context_chars": 100, "text": " for (i = 0; i <= grid_points[0] - 3; i += 1) {\n i1 = i + 1;\n i2 = i + 2;\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (fac1,k) firstprivate (n,i1,i2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n lhs[n + 1][i2][j][k] = lhs[n + 1][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 2][i2][j][k] = lhs[n + 2][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 4][i][j][k];\n rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n + 0][i2][j][k] * rhs[m][i][j][k];\n }\n } #pragma omp parallel for private (fac1,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,k) firstprivate (n,i1,i2)", "context_chars": 100, "text": "p parallel for private (fac1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n lhs[n + 1][i2][j][k] = lhs[n + 1][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 2][i2][j][k] = lhs[n + 2][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 4][i][j][k];\n rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n + 0][i2][j][k] * rhs[m][i][j][k];\n } #pragma omp parallel for private (fac1,k) firstprivate (n,i1,i2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,j,k)", "context_chars": 100, "text": "-----------------------------*/\n i = grid_points[0] - 2;\n i1 = grid_points[0] - 1;\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (fac1,fac2,k) firstprivate (i,n,i1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately\n--------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i1][j][k];\n rhs[m][i1][j][k] = fac2 * rhs[m][i1][j][k];\n }\n } #pragma omp parallel for private (fac1,fac2,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,k) firstprivate (i,n,i1)", "context_chars": 100, "text": " parallel for private (fac1,fac2,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k];\n rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately\n--------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i1][j][k];\n rhs[m][i1][j][k] = fac2 * rhs[m][i1][j][k];\n } #pragma omp parallel for private (fac1,fac2,k) firstprivate (i,n,i1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "------------------------*/\n i = grid_points[0] - 2;\n i1 = grid_points[0] - 1;\n n = 0;\n \nfor (m = 0; m <= 2; m += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (i,n,i1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k];\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": ";\n n = 0;\n \n#pragma omp parallel for private (j,k,m)\n for (m = 0; m <= 2; m += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (i,n,i1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k];\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (i,n,i1)", "context_chars": 100, "text": "pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k];\n } #pragma omp parallel for private (k) firstprivate (i,n,i1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,j,k,m)", "context_chars": 100, "text": "][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k];\n }\n }\n }\n \nfor (m = 3; m <= 4; m += 1) {\n \n#pragma omp parallel for private (n,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (n,k) firstprivate (i,i1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n n = (m - 3 + 1) * 5;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k];\n }\n }\n } #pragma omp parallel for private (n,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,j,k)", "context_chars": 100, "text": " }\n }\n \n#pragma omp parallel for private (n,j,k,m)\n for (m = 3; m <= 4; m += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (n,k) firstprivate (i,i1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n n = (m - 3 + 1) * 5;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k];\n }\n } #pragma omp parallel for private (n,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,k) firstprivate (i,i1)", "context_chars": 100, "text": "agma omp parallel for private (n,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n n = (m - 3 + 1) * 5;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k];\n } #pragma omp parallel for private (n,k) firstprivate (i,i1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k,m)", "context_chars": 100, "text": "= 0;\n for (i = grid_points[0] - 3; i >= 0; i += -1) {\n i1 = i + 1;\n i2 = i + 2;\n \nfor (m = 0; m <= 2; m += 1) {\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (n,i1,i2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k];\n }\n }\n } #pragma omp parallel for private (j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": "i + 2;\n \n#pragma omp parallel for private (j,k,m)\n for (m = 0; m <= 2; m += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (n,i1,i2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k];\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (n,i1,i2)", "context_chars": 100, "text": "ma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k];\n } #pragma omp parallel for private (k) firstprivate (n,i1,i2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,i1,i2,i,j,k,m)", "context_chars": 100, "text": " And the remaining two\n--------------------------------------------------------------------*/\n \nfor (m = 3; m <= 4; m += 1) {\n n = (m - 3 + 1) * 5;\n for (i = grid_points[0] - 3; i >= 0; i += -1) {\n i1 = i + 1;\n i2 = i + 2;\n \n#pragma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (n,i1,i2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k];\n }\n }\n }\n } #pragma omp parallel for private (n,i1,i2,i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j,k)", "context_chars": 100, "text": " for (i = grid_points[0] - 3; i >= 0; i += -1) {\n i1 = i + 1;\n i2 = i + 2;\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n \n#pragma omp parallel for private (k) firstprivate (n,i1,i2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k];\n }\n } #pragma omp parallel for private (j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (n,i1,i2)", "context_chars": 100, "text": "ma omp parallel for private (j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k];\n } #pragma omp parallel for private (k) firstprivate (n,i1,i2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,i,k,m)", "context_chars": 100, "text": " = 0;\n for (j = 0; j <= grid_points[1] - 3; j += 1) {\n j1 = j + 1;\n j2 = j + 2;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (fac1,k,m) firstprivate (n,j1,j2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n }\n lhs[n + 1][i][j2][k] = lhs[n + 1][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 3][i][j][k];\n lhs[n + 2][i][j2][k] = lhs[n + 2][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n + 0][i][j2][k] * rhs[m][i][j][k];\n }\n }\n } #pragma omp parallel for private (fac1,i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,k,m) firstprivate (n,j1,j2)", "context_chars": 100, "text": "omp parallel for private (fac1,i,k,m)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n }\n lhs[n + 1][i][j2][k] = lhs[n + 1][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 3][i][j][k];\n lhs[n + 2][i][j2][k] = lhs[n + 2][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n + 0][i][j2][k] * rhs[m][i][j][k];\n }\n } #pragma omp parallel for private (fac1,k,m) firstprivate (n,j1,j2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (fac1)", "context_chars": 100, "text": " fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n } #pragma omp parallel for private (m) firstprivate (fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "hs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": "hs[n + 2][i][j2][k] = lhs[n + 2][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n + 0][i][j2][k] * rhs[m][i][j][k];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,i,k,m)", "context_chars": 100, "text": "-----------------------------------*/\n j = grid_points[1] - 2;\n j1 = grid_points[1] - 1;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (fac1,fac2,k,m) firstprivate (j,n,j1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j1][k];\n \n#pragma omp parallel for private (m) firstprivate (fac2)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j1][k] = fac2 * rhs[m][i][j1][k];\n }\n }\n } #pragma omp parallel for private (fac1,fac2,i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,k,m) firstprivate (j,n,j1)", "context_chars": 100, "text": "mp parallel for private (fac1,fac2,i,k,m)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j1][k];\n \n#pragma omp parallel for private (m) firstprivate (fac2)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j1][k] = fac2 * rhs[m][i][j1][k];\n }\n } #pragma omp parallel for private (fac1,fac2,k,m) firstprivate (j,n,j1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (fac1)", "context_chars": 100, "text": "k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n } #pragma omp parallel for private (m) firstprivate (fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (fac2)", "context_chars": 100, "text": "----------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j1][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j1][k] = fac2 * rhs[m][i][j1][k];\n } #pragma omp parallel for private (m) firstprivate (fac2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,i,k)", "context_chars": 100, "text": " for (j = 0; j <= grid_points[1] - 3; j += 1) {\n j1 = j + 1;\n j2 = j + 2;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (fac1,k) firstprivate (n,j1,j2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n lhs[n + 1][i][j2][k] = lhs[n + 1][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 3][i][j][k];\n lhs[n + 2][i][j2][k] = lhs[n + 2][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 4][i][j][k];\n rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n + 0][i][j2][k] * rhs[m][i][j][k];\n }\n } #pragma omp parallel for private (fac1,i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,k) firstprivate (n,j1,j2)", "context_chars": 100, "text": "p parallel for private (fac1,i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n lhs[n + 1][i][j2][k] = lhs[n + 1][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 3][i][j][k];\n lhs[n + 2][i][j2][k] = lhs[n + 2][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 4][i][j][k];\n rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n + 0][i][j2][k] * rhs[m][i][j][k];\n } #pragma omp parallel for private (fac1,k) firstprivate (n,j1,j2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,i,k)", "context_chars": 100, "text": "-----------------------------*/\n j = grid_points[1] - 2;\n j1 = grid_points[1] - 1;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (fac1,fac2,k) firstprivate (j,n,j1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j1][k];\n rhs[m][i][j1][k] = fac2 * rhs[m][i][j1][k];\n }\n } #pragma omp parallel for private (fac1,fac2,i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,k) firstprivate (j,n,j1)", "context_chars": 100, "text": " parallel for private (fac1,fac2,i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k];\n rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j1][k];\n rhs[m][i][j1][k] = fac2 * rhs[m][i][j1][k];\n } #pragma omp parallel for private (fac1,fac2,k) firstprivate (j,n,j1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k,m)", "context_chars": 100, "text": "------------------------*/\n j = grid_points[1] - 2;\n j1 = grid_points[1] - 1;\n n = 0;\n \nfor (m = 0; m <= 2; m += 1) {\n \n#pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (j,n,j1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k];\n }\n }\n } #pragma omp parallel for private (i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": ";\n n = 0;\n \n#pragma omp parallel for private (i,k,m)\n for (m = 0; m <= 2; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (j,n,j1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k];\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (j,n,j1)", "context_chars": 100, "text": "pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k];\n } #pragma omp parallel for private (k) firstprivate (j,n,j1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,i,k,m)", "context_chars": 100, "text": "][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k];\n }\n }\n }\n \nfor (m = 3; m <= 4; m += 1) {\n \n#pragma omp parallel for private (n,i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (n,k) firstprivate (j,j1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n n = (m - 3 + 1) * 5;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k];\n }\n }\n } #pragma omp parallel for private (n,i,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,i,k)", "context_chars": 100, "text": " }\n }\n \n#pragma omp parallel for private (n,i,k,m)\n for (m = 3; m <= 4; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (n,k) firstprivate (j,j1)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n n = (m - 3 + 1) * 5;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k];\n }\n } #pragma omp parallel for private (n,i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,k) firstprivate (j,j1)", "context_chars": 100, "text": "agma omp parallel for private (n,i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n n = (m - 3 + 1) * 5;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k];\n } #pragma omp parallel for private (n,k) firstprivate (j,j1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j1,j2,i,j,k,m)", "context_chars": 100, "text": "hree factors\n--------------------------------------------------------------------*/\n n = 0;\n \nfor (m = 0; m <= 2; m += 1) {\n for (j = grid_points[1] - 3; j >= 0; j += -1) {\n j1 = j + 1;\n j2 = j + 2;\n \n#pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (n,j1,j2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k];\n }\n }\n }\n } #pragma omp parallel for private (j1,j2,i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": " for (j = grid_points[1] - 3; j >= 0; j += -1) {\n j1 = j + 1;\n j2 = j + 2;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (n,j1,j2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k];\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (n,j1,j2)", "context_chars": 100, "text": "ma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k];\n } #pragma omp parallel for private (k) firstprivate (n,j1,j2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,j1,j2,i,j,k,m)", "context_chars": 100, "text": " And the remaining two\n--------------------------------------------------------------------*/\n \nfor (m = 3; m <= 4; m += 1) {\n n = (m - 3 + 1) * 5;\n for (j = grid_points[1] - 3; j >= 0; j += -1) {\n j1 = j + 1;\n j2 = j1 + 1;\n \n#pragma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (n,j1,j2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k];\n }\n }\n }\n } #pragma omp parallel for private (n,j1,j2,i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,k)", "context_chars": 100, "text": " for (j = grid_points[1] - 3; j >= 0; j += -1) {\n j1 = j + 1;\n j2 = j1 + 1;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k) firstprivate (n,j1,j2)\n for (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k];\n }\n } #pragma omp parallel for private (i,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k) firstprivate (n,j1,j2)", "context_chars": 100, "text": "ma omp parallel for private (i,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (k = 1; k <= grid_points[2] - 2; k += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k];\n } #pragma omp parallel for private (k) firstprivate (n,j1,j2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k1,k2,fac1,i,j,k,m)", "context_chars": 100, "text": "\nc-------------------------------------------------------------------*/\n lhsz();\n n = 0;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k1,k2,fac1,j,k,m)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = 0; k <= grid_points[2] - 3; k += 1) {\n k1 = k + 1;\n k2 = k + 2;\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (k1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n }\n lhs[n + 1][i][j][k2] = lhs[n + 1][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 3][i][j][k];\n lhs[n + 2][i][j][k2] = lhs[n + 2][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (k2)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n + 0][i][j][k2] * rhs[m][i][j][k];\n }\n }\n }\n } #pragma omp parallel for private (k1,k2,fac1,i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k1,k2,fac1,j,k,m)", "context_chars": 100, "text": "parallel for private (k1,k2,fac1,i,j,k,m)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = 0; k <= grid_points[2] - 3; k += 1) {\n k1 = k + 1;\n k2 = k + 2;\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (k1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n }\n lhs[n + 1][i][j][k2] = lhs[n + 1][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 3][i][j][k];\n lhs[n + 2][i][j][k2] = lhs[n + 2][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (k2)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n + 0][i][j][k2] * rhs[m][i][j][k];\n }\n }\n } #pragma omp parallel for private (k1,k2,fac1,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (fac1)", "context_chars": 100, "text": " fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n } #pragma omp parallel for private (m) firstprivate (fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (k1)", "context_chars": 100, "text": "hs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n } #pragma omp parallel for private (m) firstprivate (k1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (k2)", "context_chars": 100, "text": "hs[n + 2][i][j][k2] = lhs[n + 2][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n + 0][i][j][k2] * rhs[m][i][j][k];\n } #pragma omp parallel for private (m) firstprivate (k2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,i,j,m)", "context_chars": 100, "text": "-----------------------------------*/\n k = grid_points[2] - 2;\n k1 = grid_points[2] - 1;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (fac1,fac2,j,m) firstprivate (k,n,k1)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately\nc-------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j][k1];\n \n#pragma omp parallel for private (m) firstprivate (fac2)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k1] = fac2 * rhs[m][i][j][k1];\n }\n }\n } #pragma omp parallel for private (fac1,fac2,i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,j,m) firstprivate (k,n,k1)", "context_chars": 100, "text": "mp parallel for private (fac1,fac2,i,j,m)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m) firstprivate (fac1)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n }\n lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n \n#pragma omp parallel for private (m)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately\nc-------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j][k1];\n \n#pragma omp parallel for private (m) firstprivate (fac2)\n for (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k1] = fac2 * rhs[m][i][j][k1];\n }\n } #pragma omp parallel for private (fac1,fac2,j,m) firstprivate (k,n,k1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (fac1)", "context_chars": 100, "text": "k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n } #pragma omp parallel for private (m) firstprivate (fac1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m)", "context_chars": 100, "text": " lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n } #pragma omp parallel for private (m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (m) firstprivate (fac2)", "context_chars": 100, "text": "----------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j][k1];\n \nfor (m = 0; m <= 2; m += 1) {\n rhs[m][i][j][k1] = fac2 * rhs[m][i][j][k1];\n } #pragma omp parallel for private (m) firstprivate (fac2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k1,k2,fac1,i,j,k)", "context_chars": 100, "text": "-----------------------------*/\n for (m = 3; m <= 4; m += 1) {\n n = (m - 3 + 1) * 5;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k1,k2,fac1,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = 0; k <= grid_points[2] - 3; k += 1) {\n k1 = k + 1;\n k2 = k + 2;\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n lhs[n + 1][i][j][k2] = lhs[n + 1][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 3][i][j][k];\n lhs[n + 2][i][j][k2] = lhs[n + 2][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 4][i][j][k];\n rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n + 0][i][j][k2] * rhs[m][i][j][k];\n }\n }\n } #pragma omp parallel for private (k1,k2,fac1,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k1,k2,fac1,j,k)", "context_chars": 100, "text": "rallel for private (k1,k2,fac1,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = 0; k <= grid_points[2] - 3; k += 1) {\n k1 = k + 1;\n k2 = k + 2;\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n lhs[n + 1][i][j][k2] = lhs[n + 1][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 3][i][j][k];\n lhs[n + 2][i][j][k2] = lhs[n + 2][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 4][i][j][k];\n rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n + 0][i][j][k2] * rhs[m][i][j][k];\n }\n } #pragma omp parallel for private (k1,k2,fac1,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,i,j)", "context_chars": 100, "text": "-----------------------------*/\n k = grid_points[2] - 2;\n k1 = grid_points[2] - 1;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (fac1,fac2,j) firstprivate (k,n,k1)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately (some of this is overkill\nc if this is the last cell)\nc-------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j][k1];\n rhs[m][i][j][k1] = fac2 * rhs[m][i][j][k1];\n }\n } #pragma omp parallel for private (fac1,fac2,i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (fac1,fac2,j) firstprivate (k,n,k1)", "context_chars": 100, "text": " parallel for private (fac1,fac2,i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n fac1 = 1. / lhs[n + 2][i][j][k];\n lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k];\n lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k];\n rhs[m][i][j][k] = fac1 * rhs[m][i][j][k];\n lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k];\n lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k];\n rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately (some of this is overkill\nc if this is the last cell)\nc-------------------------------------------------------------------*/\n fac2 = 1. / lhs[n + 2][i][j][k1];\n rhs[m][i][j][k1] = fac2 * rhs[m][i][j][k1];\n } #pragma omp parallel for private (fac1,fac2,j) firstprivate (k,n,k1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j,m)", "context_chars": 100, "text": "------------------------*/\n k = grid_points[2] - 2;\n k1 = grid_points[2] - 1;\n n = 0;\n \nfor (m = 0; m <= 2; m += 1) {\n \n#pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (k,n,k1)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1];\n }\n }\n } #pragma omp parallel for private (i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": ";\n n = 0;\n \n#pragma omp parallel for private (i,j,m)\n for (m = 0; m <= 2; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (k,n,k1)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1];\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (k,n,k1)", "context_chars": 100, "text": "pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1];\n } #pragma omp parallel for private (j) firstprivate (k,n,k1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,i,j,m)", "context_chars": 100, "text": "][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1];\n }\n }\n }\n \nfor (m = 3; m <= 4; m += 1) {\n n = (m - 3 + 1) * 5;\n \n#pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (k,n,k1)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1];\n }\n }\n } #pragma omp parallel for private (n,i,j,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (i,j)", "context_chars": 100, "text": " parallel for private (n,i,j,m)\n for (m = 3; m <= 4; m += 1) {\n n = (m - 3 + 1) * 5;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (j) firstprivate (k,n,k1)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1];\n }\n } #pragma omp parallel for private (i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (j) firstprivate (k,n,k1)", "context_chars": 100, "text": "pragma omp parallel for private (i,j)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1];\n } #pragma omp parallel for private (j) firstprivate (k,n,k1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k1,k2,i,j,k,m)", "context_chars": 100, "text": "hree factors\nc-------------------------------------------------------------------*/\n n = 0;\n \nfor (m = 0; m <= 2; m += 1) {\n \n#pragma omp parallel for private (k1,k2,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k1,k2,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = grid_points[2] - 3; k >= 0; k += -1) {\n k1 = k + 1;\n k2 = k + 2;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2];\n }\n }\n }\n } #pragma omp parallel for private (k1,k2,i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k1,k2,i,j,k)", "context_chars": 100, "text": "= 0;\n \n#pragma omp parallel for private (k1,k2,i,j,k,m)\n for (m = 0; m <= 2; m += 1) {\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k1,k2,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = grid_points[2] - 3; k >= 0; k += -1) {\n k1 = k + 1;\n k2 = k + 2;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2];\n }\n }\n } #pragma omp parallel for private (k1,k2,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k1,k2,j,k)", "context_chars": 100, "text": "mp parallel for private (k1,k2,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = grid_points[2] - 3; k >= 0; k += -1) {\n k1 = k + 1;\n k2 = k + 2;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2];\n }\n } #pragma omp parallel for private (k1,k2,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (n,k1,k2,i,j,k,m)", "context_chars": 100, "text": " And the remaining two\nc-------------------------------------------------------------------*/\n \nfor (m = 3; m <= 4; m += 1) {\n n = (m - 3 + 1) * 5;\n \n#pragma omp parallel for private (k1,k2,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k1,k2,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = grid_points[2] - 3; k >= 0; k += -1) {\n k1 = k + 1;\n k2 = k + 2;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2];\n }\n }\n }\n } #pragma omp parallel for private (n,k1,k2,i,j,k,m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k1,k2,i,j,k)", "context_chars": 100, "text": "l for private (n,k1,k2,i,j,k,m)\n for (m = 3; m <= 4; m += 1) {\n n = (m - 3 + 1) * 5;\n \nfor (i = 1; i <= grid_points[0] - 2; i += 1) {\n \n#pragma omp parallel for private (k1,k2,j,k)\n for (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = grid_points[2] - 3; k >= 0; k += -1) {\n k1 = k + 1;\n k2 = k + 2;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2];\n }\n }\n } #pragma omp parallel for private (k1,k2,i,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private (k1,k2,j,k)", "context_chars": 100, "text": "mp parallel for private (k1,k2,i,j,k)\n for (i = 1; i <= grid_points[0] - 2; i += 1) {\n \nfor (j = 1; j <= grid_points[1] - 2; j += 1) {\n for (k = grid_points[2] - 3; k >= 0; k += -1) {\n k1 = k + 1;\n k2 = k + 2;\n rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2];\n }\n } #pragma omp parallel for private (k1,k2,j,k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for private (j)", "context_chars": 100, "text": ".0 structured OpenMP C version - EP Benchmark\\n\");\n sprintf(size,\"%12.0f\",(pow(2.0,(28 + 1))));\n \nfor (j = 13; j >= 1; j += -1) {\n if (size[j] == '.') \n size[j] = ' ';\n } #pragma omp parallel for private (j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "iminated as dead code.\n*/\n vranlc(0,&dum[0],dum[1],&dum[2]);\n dum[0] = randlc(&dum[1],dum[2]);\n \nfor (i = 0; i <= 131071; i += 1) {\n x[i] = - 1.0e99;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n t2 = randlc(&t1,t1);\n }\n an = t1;\n tt = 271828183.0;\n gc = 0.0;\n sx = 0.0;\n sy = 0.0;\n \nfor (i = 0; i <= 9; i += 1) {\n q[i] = 0.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " int kk;\n int i;\n int ik;\n int l;\n/* private copy of q[0:NQ-1] */\n double qq[10];\n \nfor (i = 0; i <= 9; i += 1) {\n qq[i] = 0.0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for copyin(x, qq) private(x1, x2, t1, t2, t3, t4, ik, kk, i, l) reduction(+:sx) reduction(+:sy) ", "context_chars": 100, "text": " \n#pragma omp parallel for private (i)\n for (i = 0; i <= 9; i += 1) {\n qq[i] = 0.0;\n }\n//for (k = 1; k <= np; k += 1) {\n kk = k_offset + k;\n t1 = 271828183.0;\n t2 = an;\n/* Find starting seed t1 for this kk. */\n for (i = 1; i <= 100; i += 1) {\n ik = kk / 2;\n if (2 * ik != kk) \n t3 = randlc(&t1,t2);\n if (ik == 0) \n break; \n t3 = randlc(&t2,t2);\n kk = ik;\n }\n/* Compute uniform pseudorandom numbers. */\n if (0 == 1) \n timer_start(3);\n vranlc(2 * (1 << 16),&t1,1220703125.0,x - 1);\n if (0 == 1) \n timer_stop(3);\n/*\nc Compute Gaussian deviates by acceptance-rejection method and \nc tally counts in concentric square annuli. This loop is not \nc vectorizable.\n*/\n if (0 == 1) \n timer_start(2);\n for (i = 0; i <= 65535; i += 1) {\n x1 = 2.0 * x[2 * i] - 1.0;\n x2 = 2.0 * x[2 * i + 1] - 1.0;\n t1 = x1 * x1 + x2 * x2;\n if (t1 <= 1.0) {\n t2 = sqrt(- 2.0 * log(t1) / t1);\n/* Xi */\n t3 = x1 * t2;\n/* Yi */\n t4 = x2 * t2;\n l = ((fabs(t3) > fabs(t4)?fabs(t3) : fabs(t4)));\n/* counts */\n qq[l] += 1.0;\n/* sum of Xi */\n sx = sx + t3;\n/* sum of Yi */\n sy = sy + t4;\n }\n }\n if (0 == 1) \n timer_stop(2);\n } #pragma omp parallel for copyin(x, qq) private(x1, x2, t1, t2, t3, t4, ik, kk, i, l) reduction(+:sx) reduction(+:sy) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "\n sy = sy + t4;\n }\n }\n if (0 == 1) \n timer_stop(2);\n }\n{\n \nfor (i = 0; i <= 9; i += 1) {\n q[i] += qq[i];\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:gc)", "context_chars": 100, "text": " nthreads = omp_get_num_threads();\n//#endif /* _OPENMP */ \n/* end of parallel region */\n }\n \nfor (i = 0; i <= 9; i += 1) {\n gc = gc + q[i];\n } #pragma omp parallel for private (i) reduction (+:gc)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "d defaults\\n\");\n lt = 8;\n nit = 4;\n nx[lt] = 256;\n ny[lt] = 256;\n nz[lt] = 256;\n \nfor (i = 0; i <= 7; i += 1) {\n debug_vec[i] = 0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (k)", "context_chars": 100, "text": " k += -1) {\n nx[k] = nx[k + 1] / 2;\n ny[k] = ny[k + 1] / 2;\n nz[k] = nz[k + 1] / 2;\n }\n \nfor (k = 1; k <= lt; k += 1) {\n m1[k] = nx[k] + 2;\n m2[k] = nz[k] + 2;\n m3[k] = ny[k] + 2;\n } #pragma omp parallel for private (k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1,i2,i3) firstprivate (n3)", "context_chars": 100, "text": "---------------------------------------------*/\n int i1;\n int i2;\n int i3;\n/* axis = 1 */\n{\n \nfor (i3 = 1; i3 <= n3 - 1 - 1; i3 += 1) {\n \n#pragma omp parallel for private (i2)\n for (i2 = 1; i2 <= n2 - 1 - 1; i2 += 1) {\n u[i3][i2][n1 - 1] = u[i3][i2][1];\n u[i3][i2][0] = u[i3][i2][n1 - 2];\n }\n// } #pragma omp parallel for private (i1,i2,i3) firstprivate (n3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i2)", "context_chars": 100, "text": "allel for private (i1,i2,i3) firstprivate (n3)\n for (i3 = 1; i3 <= n3 - 1 - 1; i3 += 1) {\n \nfor (i2 = 1; i2 <= n2 - 1 - 1; i2 += 1) {\n u[i3][i2][n1 - 1] = u[i3][i2][1];\n u[i3][i2][0] = u[i3][i2][n1 - 2];\n } #pragma omp parallel for private (i2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1)", "context_chars": 100, "text": " 2];\n }\n// }\n/* axis = 2 */\n//#pragma omp for\n// for ( i3 = 1; i3 < n3-1; i3++) {\n \nfor (i1 = 0; i1 <= n1 - 1; i1 += 1) {\n u[i3][n2 - 1][i1] = u[i3][1][i1];\n u[i3][0][i1] = u[i3][n2 - 2][i1];\n } #pragma omp parallel for private (i1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1,i2) firstprivate (n1,n2)", "context_chars": 100, "text": " 1][i1] = u[i3][1][i1];\n u[i3][0][i1] = u[i3][n2 - 2][i1];\n }\n }\n/* axis = 3 */\n \nfor (i2 = 0; i2 <= n2 - 1; i2 += 1) {\n \n#pragma omp parallel for private (i1) firstprivate (n3)\n for (i1 = 0; i1 <= n1 - 1; i1 += 1) {\n u[n3 - 1][i2][i1] = u[1][i2][i1];\n u[0][i2][i1] = u[n3 - 2][i2][i1];\n }\n } #pragma omp parallel for private (i1,i2) firstprivate (n1,n2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1) firstprivate (n3)", "context_chars": 100, "text": " parallel for private (i1,i2) firstprivate (n1,n2)\n for (i2 = 0; i2 <= n2 - 1; i2 += 1) {\n \nfor (i1 = 0; i1 <= n1 - 1; i1 += 1) {\n u[n3 - 1][i2][i1] = u[1][i2][i1];\n u[0][i2][i1] = u[n3 - 2][i2][i1];\n } #pragma omp parallel for private (i1) firstprivate (n3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "oks for twenty candidates\nc-------------------------------------------------------------------*/\n \nfor (i = 0; i <= 9; i += 1) {\n ten[i][1] = 0.0;\n j1[i][1] = 0;\n j2[i][1] = 0;\n j3[i][1] = 0;\n ten[i][0] = 1.0;\n j1[i][0] = 0;\n j2[i][0] = 0;\n j3[i][0] = 0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1,i2,i3)", "context_chars": 100, "text": "r\\n\");\n for (i = MM-1; i >= 0; i--) {\n\tprintf(\" %4d\", jg[0][i][1]);\n }\n printf(\"\\n\");*/\n \nfor (i3 = 0; i3 <= n3 - 1; i3 += 1) {\n \n#pragma omp parallel for private (i1,i2)\n for (i2 = 0; i2 <= n2 - 1; i2 += 1) {\n \n#pragma omp parallel for private (i1)\n for (i1 = 0; i1 <= n1 - 1; i1 += 1) {\n z[i3][i2][i1] = 0.0;\n }\n }\n } #pragma omp parallel for private (i1,i2,i3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1,i2)", "context_chars": 100, "text": "\\n\");*/\n \n#pragma omp parallel for private (i1,i2,i3)\n for (i3 = 0; i3 <= n3 - 1; i3 += 1) {\n \nfor (i2 = 0; i2 <= n2 - 1; i2 += 1) {\n \n#pragma omp parallel for private (i1)\n for (i1 = 0; i1 <= n1 - 1; i1 += 1) {\n z[i3][i2][i1] = 0.0;\n }\n } #pragma omp parallel for private (i1,i2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1)", "context_chars": 100, "text": "1) {\n \n#pragma omp parallel for private (i1,i2)\n for (i2 = 0; i2 <= n2 - 1; i2 += 1) {\n \nfor (i1 = 0; i1 <= n1 - 1; i1 += 1) {\n z[i3][i2][i1] = 0.0;\n } #pragma omp parallel for private (i1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1,i2,i3) firstprivate (n2,n3)", "context_chars": 100, "text": "----------------------------------------------------------------*/\n int i1;\n int i2;\n int i3;\n \nfor (i3 = 0; i3 <= n3 - 1; i3 += 1) {\n \n#pragma omp parallel for private (i1,i2) firstprivate (n1)\n for (i2 = 0; i2 <= n2 - 1; i2 += 1) {\n \n#pragma omp parallel for private (i1)\n for (i1 = 0; i1 <= n1 - 1; i1 += 1) {\n z[i3][i2][i1] = 0.0;\n }\n }\n } #pragma omp parallel for private (i1,i2,i3) firstprivate (n2,n3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1,i2) firstprivate (n1)", "context_chars": 100, "text": "p parallel for private (i1,i2,i3) firstprivate (n2,n3)\n for (i3 = 0; i3 <= n3 - 1; i3 += 1) {\n \nfor (i2 = 0; i2 <= n2 - 1; i2 += 1) {\n \n#pragma omp parallel for private (i1)\n for (i1 = 0; i1 <= n1 - 1; i1 += 1) {\n z[i3][i2][i1] = 0.0;\n }\n } #pragma omp parallel for private (i1,i2) firstprivate (n1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private (i1)", "context_chars": 100, "text": "omp parallel for private (i1,i2) firstprivate (n1)\n for (i2 = 0; i2 <= n2 - 1; i2 += 1) {\n \nfor (i1 = 0; i1 <= n1 - 1; i1 += 1) {\n z[i3][i2][i1] = 0.0;\n } #pragma omp parallel for private (i1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (*:R23,T23)", "context_chars": 100, "text": " int i;\n int j;\n if (KS == 0) {\n R23 = 1.0;\n R46 = 1.0;\n T23 = 1.0;\n T46 = 1.0;\n \nfor (i = 1; i <= 23; i += 1) {\n R23 = 0.50 * R23;\n T23 = 2.0 * T23;\n } #pragma omp parallel for private (i) reduction (*:R23,T23)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (*:R46,T46)", "context_chars": 100, "text": "3,T23)\n for (i = 1; i <= 23; i += 1) {\n R23 = 0.50 * R23;\n T23 = 2.0 * T23;\n }\n \nfor (i = 1; i <= 46; i += 1) {\n R46 = 0.50 * R46;\n T46 = 2.0 * T46;\n } #pragma omp parallel for private (i) reduction (*:R46,T46)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private (i) reduction (+:j)", "context_chars": 100, "text": "ff2[i];\n }\n/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */\n j = 0;\n \nfor (i = 1; i <= 8388607; i += 1) {\n if (key_array[i - 1] > key_array[i]) \n j++;\n } #pragma omp parallel for private (i) reduction (+:j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "st keys are, load into */\n/* top of array bucket_size */\n \nfor (i = 0; i <= 4; i += 1) {\n partial_verify_vals[i] = key_array[test_index_array[i]];\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " partial_verify_vals[i] = key_array[test_index_array[i]];\n }\n/* Clear the work array */\n \nfor (i = 0; i <= 524287; i += 1) {\n key_buff1[i] = 0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "arallel for private (i)\n for (i = 0; i <= 524287; i += 1) {\n key_buff1[i] = 0;\n }\n }\n \nfor (i = 0; i <= 524287; i += 1) {\n prv_buff1[i] = 0;\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": " */\n for (i = 0; i <= 524286; i += 1) {\n prv_buff1[i + 1] += prv_buff1[i];\n }\n{\n \nfor (i = 0; i <= 524287; i += 1) {\n key_buff1[i] += prv_buff1[i];\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Autopar/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for private (i)", "context_chars": 100, "text": "double timecounter;\n double maxtime;\n/* Initialize the verification arrays if a valid class */\n \nfor (i = 0; i <= 4; i += 1) {\n switch('A'){\n case 'S':\n test_index_array[i] = S_test_index_array[i];\n test_rank_array[i] = S_test_rank_array[i];\n break; \n case 'A':\n test_index_array[i] = A_test_index_array[i];\n test_rank_array[i] = A_test_rank_array[i];\n break; \n case 'W':\n test_index_array[i] = W_test_index_array[i];\n test_rank_array[i] = W_test_rank_array[i];\n break; \n case 'B':\n test_index_array[i] = B_test_index_array[i];\n test_rank_array[i] = B_test_rank_array[i];\n break; \n case 'C':\n test_index_array[i] = C_test_index_array[i];\n test_rank_array[i] = C_test_rank_array[i];\n break; \n }\n } #pragma omp parallel for private (i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB009-lastprivatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i) lastprivate(x)", "context_chars": 100, "text": "tus private(i) \n\t#pragma cetus lastprivate(x) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i) lastprivate(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "en; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, temp) reduction(+: sum)", "context_chars": 100, "text": "i, j, temp) \n\t#pragma loop name main#1 \n\t#pragma cetus reduction(+: sum) \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j, temp) reduction(+: sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j, temp) reduction(+: sum)", "context_chars": 100, "text": "temp) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus reduction(+: sum) \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j, temp) reduction(+: sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB019-plusplus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "n=atoi(argv[1]);\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "n=atoi(argv[1]);\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+: numNodes2)", "context_chars": 100, "text": "te(i) \n\t#pragma loop name main#1 \n\t#pragma cetus reduction(+: numNodes2) \n\t#pragma cetus parallel \n\tfor (i=(numNodes-1); i>( - 1); -- i)\n\t{\n\t\tif (x[i]<=0)\n\t\t{\n\t\t\tnumNodes2 -- ;\n\t\t}\n\t} #pragma omp parallel for private(i) reduction(+: numNodes2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "toi(argv[1]);\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "en; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, temp) reduction(+: sum)", "context_chars": 100, "text": "i, j, temp) \n\t#pragma loop name main#1 \n\t#pragma cetus reduction(+: sum) \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j, temp) reduction(+: sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j, temp) reduction(+: sum)", "context_chars": 100, "text": "temp) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus reduction(+: sum) \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j, temp) reduction(+: sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "a, 0, sizeof a);\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<20; i+=1)\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<20; j+=1)\n\t\t{\n\t\t\ta[i][j]+=((i+j)+0.1);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "<20; i+=1)\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<20; j+=1)\n\t\t{\n\t\t\ta[i][j]+=((i+j)+0.1);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "-1); i+=1)\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<20; j+=1)\n\t\t{\n\t\t\ta[i][j]+=a[i+1][j];\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB071-targetparallelfor-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB071-targetparallelfor-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "+ )\n\t{\n\t\ta[i]=i;\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB034-truedeplinear-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "n=atoi(argv[1]);\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB008-indirectaccess4-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\t/* initialize segments touched by indexSet */\n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=521; i<=2025; ++ i)\n\t{\n\t\tbase[i]=(0.5*i);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB002-antidep1-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "n=atoi(argv[1]);\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB093-doall2-collapse-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=i;\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB093-doall2-collapse-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=i;\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB093-doall2-collapse-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "[i][j]=i;\n\t\t}\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(a[i][j]+1);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB093-doall2-collapse-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(a[i][j]+1);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name init#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "ut bounds of j */\n\t#pragma cetus private(i, j) \n\t#pragma loop name foo#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB059-lastprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i) lastprivate(x)", "context_chars": 100, "text": "etus private(i) \n\t#pragma cetus lastprivate(x) \n\t#pragma loop name foo#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\tx=i;\n\t} #pragma omp parallel for private(i) lastprivate(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB037-truedepseconddimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB037-truedepseconddimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB037-truedepseconddimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "j]=(i+j);\n\t\t}\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB067-restrictpointer1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "nt length)\n{\n\tint i;\n\t#pragma cetus private(i) \n\t#pragma loop name foo#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<=(length-1); i+=1)\n\t{\n\t\tnewSxx[i]=0.0;\n\t\tnewSyy[i]=0.0;\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB114-if-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB001-antidep1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB024-simdtruedep-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB091-threadprivate2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "0;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB091-threadprivate2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "ence calculation */\n\t#pragma cetus private(i) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(i+j);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(i+j);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "j]=(i+j);\n\t\t}\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]+=1;\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB095-doall2-taskloop-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]+=1;\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB054-inneronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB054-inneronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB054-inneronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=1; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB018-plusplus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "0;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "*/\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, i2) reduction(+: sum)", "context_chars": 100, "text": "vate(i, i2) \n\t#pragma loop name main#1 \n\t#pragma cetus reduction(+: sum) \n\t#pragma cetus parallel \n\tfor (i2=0; i2 #pragma omp parallel for private(i, i2) reduction(+: sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+: sum)", "context_chars": 100, "text": "te(i) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus reduction(+: sum) \n\t\t#pragma cetus parallel \n\t\tfor (i=i2; i<(((i2+256) #pragma omp parallel for private(i) reduction(+: sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+: sum2)", "context_chars": 100, "text": "private(i) \n\t#pragma loop name main#2 \n\t#pragma cetus reduction(+: sum2) \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i) reduction(+: sum2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB084-threadprivatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "ence calculation */\n\t#pragma cetus private(i) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=1; i<=1000; i ++ )\n\t{\n\t\t\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB049-fprintf-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB099-targetparallelfor2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": " b, int N)\n{\n\tint i;\n\t#pragma cetus private(i) \n\t#pragma loop name foo#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB099-targetparallelfor2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB032-truedepfirstdimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "toi(argv[1]);\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB032-truedepfirstdimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB032-truedepfirstdimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=1; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB092-threadprivatemissing2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "0;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=1; i<=1000; i ++ )\n\t{\n\t\t\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB092-threadprivatemissing2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "ence calculation */\n\t#pragma cetus private(i) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=1; i<=1000; i ++ )\n\t{\n\t\t\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name init#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus lastprivate(j) \n\t\t#pragma loop name init#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for lastprivate(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=((i*j)+0.01);\n\t\t}\n\t\tv_out[i]=((i*j)+0.01);\n\t\tv[i]=((i*j)+0.01);\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for lastprivate(j)", "context_chars": 100, "text": "i ++ )\n\t{\n\t\t#pragma cetus lastprivate(j) \n\t\t#pragma loop name init#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=((i*j)+0.01);\n\t\t} #pragma omp parallel for lastprivate(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": ";\n\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name mv#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\tdouble sum = 0.0;\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name mv#0#0 \n\t\t#pragma cetus reduction(+: sum) \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j) reduction(+: sum)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\tsum+=(a[i][j]*v[j]);\n\t\t}\n\t\tv_out[i]=sum;\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j) reduction(+: sum)", "context_chars": 100, "text": "vate(j) \n\t\t#pragma loop name mv#0#0 \n\t\t#pragma cetus reduction(+: sum) \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\tsum+=(a[i][j]*v[j]);\n\t\t} #pragma omp parallel for private(j) reduction(+: sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB026-targetparallelfor-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB094-doall2-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(i+j);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB094-doall2-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(i+j);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB010-lastprivatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i) lastprivate(x)", "context_chars": 100, "text": "tus private(i) \n\t#pragma cetus lastprivate(x) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i) lastprivate(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB065-pireduction-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, x) reduction(+: pi)", "context_chars": 100, "text": "rivate(i, x) \n\t#pragma loop name main#0 \n\t#pragma cetus reduction(+: pi) \n\t#pragma cetus parallel \n\tfor (i=0; i<2000000000; i ++ )\n\t{\n\t\tx=((i+0.5)*interval_width);\n\t\tpi+=(1.0/((x*x)+1.0));\n\t} #pragma omp parallel for private(i, x) reduction(+: pi)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "n=atoi(argv[1]);\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, tmp)", "context_chars": 100, "text": "{\n\t\ta[i]=i;\n\t}\n\t#pragma cetus private(i, tmp) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB073-doall2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=i;\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB073-doall2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=i;\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB073-doall2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "[i][j]=i;\n\t\t}\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(a[i][j]+1);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB073-doall2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(a[i][j]+1);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB006-indirectaccess2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\t/* initialize segments touched by indexSet */\n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=521; i<=2025; ++ i)\n\t{\n\t\tbase[i]=(0.5*i);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB030-truedep1-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "n=atoi(argv[1]);\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ", int len)\n{\n\tint i;\n\t#pragma cetus private() \n\t#pragma loop name foo1#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "i;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; ++ i)\n\t{\n\t\tc[i]=(i+1.01);\n\t\to1[i]=(i+1.01);\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "en; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": ")i)/7.0);\n\t\t}\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "en; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=i;\n\t\t\tb[i][j]=i;\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=i;\n\t\t\tb[i][j]=i;\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "[i][j]=i;\n\t\t}\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(a[i][j]+1);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(a[i][j]+1);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "i][j]+1);\n\t\t}\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#2 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#2#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\tb[i][j]=(b[i][j]+1);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#2#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\tb[i][j]=(b[i][j]+1);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB070-simd1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "i;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\ta[i]=(i*40);\n\t\tb[i]=(i-1);\n\t\tc[i]=i;\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB070-simd1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "(i-1);\n\t\tc[i]=i;\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\ta[i]=(b[i]*c[i]);\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, tmp)", "context_chars": 100, "text": "c int tmp;\n\t\t#pragma cetus private(i, tmp) \n\t\t#pragma loop name main#1 \n\t\t#pragma cetus parallel \n\t\tfor (i=0; i #pragma omp parallel for private(i, tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, tmp)", "context_chars": 100, "text": "\t\tint tmp;\n\t\t#pragma cetus private(i, tmp) \n\t\t#pragma loop name main#2 \n\t\t#pragma cetus parallel \n\t\tfor (i=0; i #pragma omp parallel for private(i, tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name init#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<1000; i ++ )\n\t{\n\t\t#pragma cetus lastprivate(j) \n\t\t#pragma loop name init#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for lastprivate(j)\n\t\tfor (j=0; j<1000; j ++ )\n\t\t{\n\t\t\ta[i][j]=((i*j)+0.01);\n\t\t}\n\t\tv_out[i]=((i*j)+0.01);\n\t\tv[i]=((i*j)+0.01);\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for lastprivate(j)", "context_chars": 100, "text": "i ++ )\n\t{\n\t\t#pragma cetus lastprivate(j) \n\t\t#pragma loop name init#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<1000; j ++ )\n\t\t{\n\t\t\ta[i][j]=((i*j)+0.01);\n\t\t} #pragma omp parallel for lastprivate(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": " mv()\n{\n\tint i, j;\n\t#pragma cetus private(i, j) \n\t#pragma loop name mv#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<1000; i ++ )\n\t{\n\t\tdouble sum = 0.0;\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name mv#0#0 \n\t\t#pragma cetus reduction(+: sum) \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j) reduction(+: sum)\n\t\tfor (j=0; j<1000; j ++ )\n\t\t{\n\t\t\tsum+=(a[i][j]*v[j]);\n\t\t}\n\t\tv_out[i]=sum;\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j) reduction(+: sum)", "context_chars": 100, "text": "vate(j) \n\t\t#pragma loop name mv#0#0 \n\t\t#pragma cetus reduction(+: sum) \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<1000; j ++ )\n\t\t{\n\t\t\tsum+=(a[i][j]*v[j]);\n\t\t} #pragma omp parallel for private(j) reduction(+: sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB029-truedep1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB111-linearmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "0;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB111-linearmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+: c[i+j])", "context_chars": 100, "text": "ivate(i) \n\t#pragma loop name main#1 \n\t#pragma cetus reduction(+: c[i+j]) \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i) reduction(+: c[i+j])"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, tmp)", "context_chars": 100, "text": "{\n\t\ta[i]=i;\n\t}\n\t#pragma cetus private(i, tmp) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": " n, int g)\n{\n\tint i;\n\t#pragma cetus private(i) \n\t#pragma loop name foo#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "i;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\ta[i]=i;\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name init#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "ut bounds of j */\n\t#pragma cetus private(i, j) \n\t#pragma loop name foo#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB047-doallchar-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "i;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\ta[i]=(i%120);\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB047-doallchar-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "\n\t\ta[i]=(i%120);\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\ta[i]=(a[i]+1);\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB013-nowait-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "5;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB013-nowait-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "+ )\n\t{\n\t\ta[i]=i;\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "en; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "-1); i+=1)\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB005-indirectaccess1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\t/* initialize segments touched by indexSet */\n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=521; i<=2025; ++ i)\n\t{\n\t\tbase[i]=(0.5*i);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name init#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name init#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(((double)i)*j);\n\t\t\tb[i][j]=(((double)i)*j);\n\t\t\tc[i][j]=(((double)i)*j);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name init#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(((double)i)*j);\n\t\t\tb[i][j]=(((double)i)*j);\n\t\t\tc[i][j]=(((double)i)*j);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j, k)", "context_chars": 100, "text": "nt _ret_val_0;\n\t#pragma cetus private(i, j, k) \n\t#pragma loop name mmm#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j, k) \n\t\t#pragma loop name mmm#0#0 \n\t\t#pragma cetus reduction(+: c[i][j]) \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j, k) reduction(+: c[i][j])\n\t\tfor (k=0; k<100; k ++ )\n\t\t{\n\t\t\t#pragma cetus private(j) \n\t\t\t#pragma loop name mmm#0#0#0 \n\t\t\t#pragma cetus parallel \n\t\t\t#pragma omp parallel for private(j)\n\t\t\tfor (j=0; j<100; j ++ )\n\t\t\t{\n\t\t\t\tc[i][j]=(c[i][j]+(a[i][k]*b[k][j]));\n\t\t\t}\n\t\t}\n\t} #pragma omp parallel for private(i, j, k)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j, k) reduction(+: c[i][j])", "context_chars": 100, "text": "k) \n\t\t#pragma loop name mmm#0#0 \n\t\t#pragma cetus reduction(+: c[i][j]) \n\t\t#pragma cetus parallel \n\t\tfor (k=0; k<100; k ++ )\n\t\t{\n\t\t\t#pragma cetus private(j) \n\t\t\t#pragma loop name mmm#0#0#0 \n\t\t\t#pragma cetus parallel \n\t\t\t#pragma omp parallel for private(j)\n\t\t\tfor (j=0; j<100; j ++ )\n\t\t\t{\n\t\t\t\tc[i][j]=(c[i][j]+(a[i][k]*b[k][j]));\n\t\t\t}\n\t\t} #pragma omp parallel for private(j, k) reduction(+: c[i][j])"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "++ )\n\t\t{\n\t\t\t#pragma cetus private(j) \n\t\t\t#pragma loop name mmm#0#0#0 \n\t\t\t#pragma cetus parallel \n\t\t\tfor (j=0; j<100; j ++ )\n\t\t\t{\n\t\t\t\tc[i][j]=(c[i][j]+(a[i][k]*b[k][j]));\n\t\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB007-indirectaccess3-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\t/* initialize segments touched by indexSet */\n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=521; i<=2025; ++ i)\n\t{\n\t\tbase[i]=(0.5*i);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "*/\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i) reduction(+: numNodes2)", "context_chars": 100, "text": "te(i) \n\t#pragma loop name main#1 \n\t#pragma cetus reduction(+: numNodes2) \n\t#pragma cetus parallel \n\tfor (i=(numNodes-1); i>( - 1); -- i)\n\t{\n\t\tif (x[i]<=0)\n\t\t{\n\t\t\tnumNodes2 -- ;\n\t\t}\n\t} #pragma omp parallel for private(i) reduction(+: numNodes2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB046-doall2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(i+j);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB046-doall2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(i+j);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB046-doall2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "j]=(i+j);\n\t\t}\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\t#pragma omp parallel for private(j)\n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(a[i][j]+1);\n\t\t}\n\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB046-doall2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "00; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j<100; j ++ )\n\t\t{\n\t\t\ta[i][j]=(a[i][j]+1);\n\t\t} #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB025-simdtruedep-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "n=atoi(argv[1]);\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB033-truedeplinear-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "];\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<2000; i ++ )\n\t{\n\t\ta[i]=i;\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB045-doall1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "i;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\ta[i]=i;\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB045-doall1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "+ )\n\t{\n\t\ta[i]=i;\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i<100; i ++ )\n\t{\n\t\ta[i]=(a[i]+1);\n\t} #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB052-indirectaccesssharebase-orig-no.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " \\n\");\n\t\t_ret_val_0=1;\n\t\treturn _ret_val_0;\n\t}\n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=521; i<=2025; ++ i)\n\t{\n\t\tbase[i]=0.0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB068-restrictpointer2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "strict c)\n{\n\tint i;\n\t#pragma cetus private(i) \n\t#pragma loop name init#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB068-restrictpointer2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "estrict c)\n{\n\tint i;\n\t#pragma cetus private(i) \n\t#pragma loop name foo#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB040-truedepsingleelement-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "gv[1]);\n\t}\n\ta[0]=2;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB031-truedepfirstdimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "\tint _ret_val_0;\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB031-truedepfirstdimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB031-truedepfirstdimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=1; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "toi(argv[1]);\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "en; i ++ )\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#0#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "-1); i+=1)\n\t{\n\t\t#pragma cetus private(j) \n\t\t#pragma loop name main#1#0 \n\t\t#pragma cetus parallel \n\t\tfor (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB104-nowait-barrier-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "5;\n\tint _ret_val_0;\n\t#pragma cetus private(i) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB104-nowait-barrier-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i)", "context_chars": 100, "text": "+ )\n\t{\n\t\ta[i]=i;\n\t}\n\t#pragma cetus private(i) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB038-truedepseconddimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "toi(argv[1]);\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#0 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB038-truedepseconddimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "for (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/dataracebench/DRB038-truedepseconddimension-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": "j]=(i+j);\n\t\t}\n\t}\n\t#pragma cetus private(i, j) \n\t#pragma loop name main#1 \n\t#pragma cetus parallel \n\tfor (i=0; i #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: in_final_sum)", "context_chars": 100, "text": "#pragma loop name kernel#3 \n\t\t#pragma cetus reduction(+: in_final_sum) \n\t\t#pragma cetus parallel \n\t\tfor (i=0; i #pragma omp parallel for reduction(+: in_final_sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: in_sqr_final_sum)", "context_chars": 100, "text": "gma loop name kernel#4 \n\t\t#pragma cetus reduction(+: in_sqr_final_sum) \n\t\t#pragma cetus parallel \n\t\tfor (i=0; i #pragma omp parallel for reduction(+: in_sqr_final_sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/rodinia_3.1/openmp/heartwall/kernel.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_Col[private.point_no])-1);\n\t\t/* work */\n\t\t#pragma loop name kernel#16 \n\t\t#pragma cetus parallel \n\t\tfor (ei_new=0; ei_new #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/rodinia_3.1/openmp/heartwall/AVI/avilib.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " are finished with writing\n\t*/\n\t#pragma loop name AVI_open_output_file#0 \n\t#pragma cetus parallel \n\tfor (i=0; i<2048; i ++ )\n\t{\n\t\tAVI_header[i]=0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/rodinia_3.1/openmp/heartwall/AVI/avilib.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " audio index arrays */\n\tnvi=0;\n\t#pragma loop name avi_parse_input_file#3 \n\t#pragma cetus parallel \n\tfor (j=0; janum; ++ j)\n\t{\n\t\tnai[j]=0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/rodinia_3.1/openmp/heartwall/AVI/avilib.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "al_0;\n\t\t\t}\n\t\t\t;\n\t\t}\n\t}\n\tnvi=0;\n\t#pragma loop name avi_parse_input_file#7 \n\t#pragma cetus parallel \n\tfor (j=0; janum; ++ j)\n\t{\n\t\tnai[j]=(tot[j]=0);\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for reduction(*: R23, T23)", "context_chars": 100, "text": ";\n\t\t#pragma loop name randlc#0 \n\t\t#pragma cetus reduction(*: R23, T23) \n\t\t#pragma cetus parallel \n\t\tfor (i=1; i<=23; i ++ )\n\t\t{\n\t\t\tR23=(0.5*R23);\n\t\t\tT23=(2.0*T23);\n\t\t} #pragma omp parallel for reduction(*: R23, T23)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for reduction(*: R46, T46)", "context_chars": 100, "text": "}\n\t\t#pragma loop name randlc#1 \n\t\t#pragma cetus reduction(*: R46, T46) \n\t\t#pragma cetus parallel \n\t\tfor (i=1; i<=46; i ++ )\n\t\t{\n\t\t\tR46=(0.5*R46);\n\t\t\tT46=(2.0*T46);\n\t\t} #pragma omp parallel for reduction(*: R46, T46)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for reduction(+: j)", "context_chars": 100, "text": "/\n\tj=0;\n\t#pragma loop name full_verify#1 \n\t#pragma cetus reduction(+: j) \n\t#pragma cetus parallel \n\tfor (i=1; i<(1<<23); i ++ )\n\t{\n\t\tif (key_array[i-1]>key_array[i])\n\t\t{\n\t\t\tj ++ ;\n\t\t}\n\t} #pragma omp parallel for reduction(+: j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "dex_array[i]];\n\t}\n\t/* Clear the work array */\n\t#pragma loop name rank#1 \n\t#pragma cetus parallel \n\tfor (i=0; i<(1<<19); i ++ )\n\t{\n\t\tkey_buff1[i]=0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/Cetus/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i=0; i<(1<<19); i ++ )\n\t{\n\t\tkey_buff1[i]=0;\n\t}\n\t#pragma loop name rank#2 \n\t#pragma cetus parallel \n\tfor (i=0; i<(1<<19); i ++ )\n\t{\n\t\tprv_buff1[i]=0;\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_gpu_target/dataracebench/DRB031-truedepfirstdimension-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "[1000][1000];\n\n #pragma omp target data map(from:b[0:1000][0:1000])\n {\n #pragma omp target\n {\n for (i=0; i #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "uble B[500 + 0][500 + 0])\n{\n //int i;\n //int j;\n{\n int c1;\n int c2;\n if (n >= 1) {\n for (c1 = 0; c1 <= n + -1; c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= n + -1; c2++) {\n X[c1][c2] = (((double )c1) * (c2 + 1) + 1) / n;\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "{\n #pragma omp parallel for private(c1 ,c2 ) \n for (c1 = 0; c1 <= n + -1; c1++) {\n for (c2 = 0; c2 <= n + -1; c2++) {\n X[c1][c2] = (((double )c1) * (c2 + 1) + 1) / n;\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ,c8 ) ", "context_chars": 100, "text": ";\n \n //#pragma scop\n{\n int c0;\n int c2;\n int c8;\n for (c0 = 0; c0 <= 9; c0++) {\n for (c2 = 0; c2 <= 499; c2++) {\n for (c8 = 1; c8 <= 499; c8++) {\n B[c2][c8] = B[c2][c8] - A[c2][c8] * A[c2][c8] / B[c2][c8 - 1];\n }\n for (c8 = 1; c8 <= 499; c8++) {\n X[c2][c8] = X[c2][c8] - X[c2][c8 - 1] * A[c2][c8] / B[c2][c8 - 1];\n }\n for (c8 = 0; c8 <= 497; c8++) {\n X[c2][500 - c8 - 2] = (X[c2][500 - 2 - c8] - X[c2][500 - 2 - c8 - 1] * A[c2][500 - c8 - 3]) / B[c2][500 - 3 - c8];\n }\n } #pragma omp parallel for private(c2 ,c8 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "c8] - X[c2][500 - 2 - c8 - 1] * A[c2][500 - c8 - 3]) / B[c2][500 - 3 - c8];\n }\n }\n for (c2 = 0; c2 <= 499; c2++) {\n X[c2][500 - 1] = X[c2][500 - 1] / B[c2][500 - 1];\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ,c8 ) ", "context_chars": 100, "text": "(c2 = 0; c2 <= 499; c2++) {\n X[c2][500 - 1] = X[c2][500 - 1] / B[c2][500 - 1];\n }\n for (c2 = 0; c2 <= 499; c2++) {\n for (c8 = 1; c8 <= 499; c8++) {\n B[c8][c2] = B[c8][c2] - A[c8][c2] * A[c8][c2] / B[c8 - 1][c2];\n }\n for (c8 = 1; c8 <= 499; c8++) {\n X[c8][c2] = X[c8][c2] - X[c8 - 1][c2] * A[c8][c2] / B[c8 - 1][c2];\n }\n for (c8 = 0; c8 <= 497; c8++) {\n X[500 - 2 - c8][c2] = (X[500 - 2 - c8][c2] - X[500 - c8 - 3][c2] * A[500 - 3 - c8][c2]) / B[500 - 2 - c8][c2];\n }\n } #pragma omp parallel for private(c2 ,c8 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB043-adi-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "c8][c2] - X[500 - c8 - 3][c2] * A[500 - 3 - c8][c2]) / B[500 - 2 - c8][c2];\n }\n }\n for (c2 = 0; c2 <= 499; c2++) {\n X[500 - 1][c2] = X[500 - 1][c2] / B[500 - 1][c2];\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "int argc, char* argv[])\n{\n int i,j;\n float temp, sum=0.0;\n int len=100;\n\n float u[100][100];\n\n for (i = 0; i < len; i++)\n #pragma omp parallel for private(j ) \n for (j = 0; j < len; j++)\n u[i][j] = 0.5; \n\n #pragma omp parallel for private(temp ,j ) reduction(+:sum) \n for (i = 0; i < len; i++)\n #pragma omp parallel for private(temp ,j ) reduction(+:sum) \n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": ";\n\n float u[100][100];\n\n #pragma omp parallel for private(i ,j ) \n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n u[i][j] = 0.5; \n\n #pragma omp parallel for private(temp ,j ) reduction(+:sum) \n for (i = 0; i < len; i++)\n #pragma omp parallel for private(temp ,j ) reduction(+:sum) \n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(temp ,j ) reduction(+:sum) ", "context_chars": 100, "text": "\n #pragma omp parallel for private(j ) \n for (j = 0; j < len; j++)\n u[i][j] = 0.5; \n\n for (i = 0; i < len; i++)\n #pragma omp parallel for private(temp ,j ) reduction(+:sum) \n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(temp ,j ) reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(temp ,j ) reduction(+:sum) ", "context_chars": 100, "text": "5; \n\n #pragma omp parallel for private(temp ,j ) reduction(+:sum) \n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(temp ,j ) reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB019-plusplus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " outLen = 0;\n\n if (argc>1)\n inLen= atoi(argv[1]);\n\n int input[inLen]; \n int output[inLen];\n\n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "n=100;\n\n if (argc>1)\n len = atoi(argv[1]);\n\n int numNodes=len, numNodes2=0; \n int x[len]; \n\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:numNodes2) ", "context_chars": 100, "text": "ate(i ) \n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n }\n\n for (i=numNodes-1 ; i>-1 ; --i) {\n if (x[i]<=0) {\n numNodes2+=-1;\n }\n } #pragma omp parallel for private(i ) reduction(+:numNodes2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": " float temp, sum=0.0;\n int len=100;\n if (argc>1)\n len = atoi(argv[1]);\n float u[len][len];\n\n for (i = 0; i < len; i++)\n #pragma omp parallel for private(j ) \n for (j = 0; j < len; j++)\n u[i][j] = 0.5;\n\n #pragma omp parallel for private(i ,temp ,j ) reduction(+:sum) \n for (i = 0; i < len; i++)\n #pragma omp parallel for private(temp ,j ) reduction(+:sum) \n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": ");\n float u[len][len];\n\n #pragma omp parallel for private(i ,j ) \n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n u[i][j] = 0.5;\n\n #pragma omp parallel for private(i ,temp ,j ) reduction(+:sum) \n for (i = 0; i < len; i++)\n #pragma omp parallel for private(temp ,j ) reduction(+:sum) \n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ,temp ,j ) reduction(+:sum) ", "context_chars": 100, "text": ")\n #pragma omp parallel for private(j ) \n for (j = 0; j < len; j++)\n u[i][j] = 0.5;\n\n for (i = 0; i < len; i++)\n #pragma omp parallel for private(temp ,j ) reduction(+:sum) \n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(i ,temp ,j ) reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(temp ,j ) reduction(+:sum) ", "context_chars": 100, "text": "\n\n #pragma omp parallel for private(i ,temp ,j ) reduction(+:sum) \n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private(temp ,j ) reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c3 ,c4 ,c2 ) ", "context_chars": 100, "text": ")\n{\n //int i;\n //int j;\n{\n int c1;\n int c2;\n int c4;\n int c3;\n if (n >= 1) {\n for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n #pragma omp parallel for private(c2 ,c3 ,c4 ) \n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n \t #pragma omp parallel for private(c3 ,c4 ) \n \t for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {\n\t #pragma omp parallel for private(c4 ) \n\t for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {\n A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;\n B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;\n }\n }\n }\n } #pragma omp parallel for private(c1 ,c3 ,c4 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ,c3 ,c4 ) ", "context_chars": 100, "text": " 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n \t #pragma omp parallel for private(c3 ,c4 ) \n \t for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {\n\t #pragma omp parallel for private(c4 ) \n\t for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {\n A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;\n B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;\n }\n }\n } #pragma omp parallel for private(c2 ,c3 ,c4 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c3 ,c4 ) ", "context_chars": 100, "text": "0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n \t for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {\n\t #pragma omp parallel for private(c4 ) \n\t for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {\n A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;\n B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c3 ,c4 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c4 ) ", "context_chars": 100, "text": ") \n \t for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {\n\t for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {\n A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;\n B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;\n } #pragma omp parallel for private(c4 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB056-jacobi2d-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c5 ,c4 ,c2 ,c3 ) ", "context_chars": 100, "text": "/ 16) : -((-(n + 3 * tsteps + -4) + 16 - 1) / 16))) : (n + 3 * tsteps + -4) / 16)); c0++) {\n for (c1 = (((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) > (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))?((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) : (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))); c1 <= (((((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) < c0?(((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) : c0)); c1++) {\n for (c2 = ((((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) > 2 * c0 + -2 * c1?(((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) : 2 * c0 + -2 * c1); c2 <= (((((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) < (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16))?(((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) : (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16)))); c2++) {\n if (c0 <= (((32 * c1 + 16 * c2 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 + 1) / 32) : -((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 - 1) / 32))) : (32 * c1 + 16 * c2 + -1 * n + 1) / 32)) && c1 <= c2 + -1) {\n if ((n + 1) % 2 == 0) {\n for (c4 = (16 * c1 > 16 * c2 + -1 * n + 3?16 * c1 : 16 * c2 + -1 * n + 3); c4 <= 16 * c1 + 15; c4++) {\n A[-16 * c2 + c4 + n + -2][n + -2] = B[-16 * c2 + c4 + n + -2][n + -2];\n }\n }\n }\n if (c0 <= (((48 * c1 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(48 * c1 + -1 * n + 1) + 32 + 1) / 32) : -((-(48 * c1 + -1 * n + 1) + 32 - 1) / 32))) : (48 * c1 + -1 * n + 1) / 32)) && c1 >= c2) {\n if ((n + 1) % 2 == 0) {\n for (c5 = (16 * c2 > 16 * c1 + -1 * n + 3?16 * c2 : 16 * c1 + -1 * n + 3); c5 <= ((16 * c1 < 16 * c2 + 15?16 * c1 : 16 * c2 + 15)); c5++) {\n A[n + -2][-16 * c1 + c5 + n + -2] = B[n + -2][-16 * c1 + c5 + n + -2];\n }\n }\n }\n for (c3 = ((((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) > 16 * c0 + -16 * c1?(((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) : 16 * c0 + -16 * c1); c3 <= ((((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) < 16 * c0 + -16 * c1 + 15?((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) : 16 * c0 + -16 * c1 + 15)); c3++) {\n if (c1 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {\n for (c5 = (16 * c2 > 2 * c3 + 1?16 * c2 : 2 * c3 + 1); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {\n B[1][-2 * c3 + c5] = 0.2 * (A[1][-2 * c3 + c5] + A[1][-2 * c3 + c5 - 1] + A[1][1 + (-2 * c3 + c5)] + A[1 + 1][-2 * c3 + c5] + A[1 - 1][-2 * c3 + c5]);\n }\n }\n for (c4 = (16 * c1 > 2 * c3 + 2?16 * c1 : 2 * c3 + 2); c4 <= ((16 * c1 + 15 < 2 * c3 + n + -2?16 * c1 + 15 : 2 * c3 + n + -2)); c4++) {\n if (c2 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {\n B[-2 * c3 + c4][1] = 0.2 * (A[-2 * c3 + c4][1] + A[-2 * c3 + c4][1 - 1] + A[-2 * c3 + c4][1 + 1] + A[1 + (-2 * c3 + c4)][1] + A[-2 * c3 + c4 - 1][1]);\n }\n for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {\n B[-2 * c3 + c4][-2 * c3 + c5] = 0.2 * (A[-2 * c3 + c4][-2 * c3 + c5] + A[-2 * c3 + c4][-2 * c3 + c5 - 1] + A[-2 * c3 + c4][1 + (-2 * c3 + c5)] + A[1 + (-2 * c3 + c4)][-2 * c3 + c5] + A[-2 * c3 + c4 - 1][-2 * c3 + c5]);\n A[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1] = B[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1];\n }\n if (c2 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {\n A[-2 * c3 + c4 + -1][n + -2] = B[-2 * c3 + c4 + -1][n + -2];\n }\n }\n if (c1 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {\n for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -1?16 * c2 + 15 : 2 * c3 + n + -1)); c5++) {\n A[n + -2][-2 * c3 + c5 + -1] = B[n + -2][-2 * c3 + c5 + -1];\n }\n }\n }\n if (c0 >= (((2 * c1 + c2 + -1) * 2 < 0?-(-(2 * c1 + c2 + -1) / 2) : ((2 < 0?(-(2 * c1 + c2 + -1) + - 2 - 1) / - 2 : (2 * c1 + c2 + -1 + 2 - 1) / 2)))) && c1 >= c2 + 1 && c2 <= (((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8))) {\n for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < 16 * c2 + n + 12?16 * c1 + 15 : 16 * c2 + n + 12)); c4++) {\n B[-16 * c2 + c4 + -14][1] = 0.2 * (A[-16 * c2 + c4 + -14][1] + A[-16 * c2 + c4 + -14][1 - 1] + A[-16 * c2 + c4 + -14][1 + 1] + A[1 + (-16 * c2 + c4 + -14)][1] + A[-16 * c2 + c4 + -14 - 1][1]);\n }\n }\n if (c0 >= (((3 * c1 + -1) * 2 < 0?-(-(3 * c1 + -1) / 2) : ((2 < 0?(-(3 * c1 + -1) + - 2 - 1) / - 2 : (3 * c1 + -1 + 2 - 1) / 2)))) && c1 <= (((((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) < c2?(((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) : c2))) {\n for (c5 = (16 * c2 > 16 * c1 + 15?16 * c2 : 16 * c1 + 15); c5 <= ((16 * c2 + 15 < 16 * c1 + n + 12?16 * c2 + 15 : 16 * c1 + n + 12)); c5++) {\n B[1][-16 * c1 + c5 + -14] = 0.2 * (A[1][-16 * c1 + c5 + -14] + A[1][-16 * c1 + c5 + -14 - 1] + A[1][1 + (-16 * c1 + c5 + -14)] + A[1 + 1][-16 * c1 + c5 + -14] + A[1 - 1][-16 * c1 + c5 + -14]);\n }\n }\n }\n } #pragma omp parallel for private(c1 ,c5 ,c4 ,c2 ,c3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "t main(int argc,char *argv[])\n{\n int i;\n int j;\n double a[20][20];\n memset(a,0,(sizeof(a)));\n\n for (i = 0; i < 20; i++)\n #pragma omp parallel for private(j ) \n for (j = 0; j < 20; j++)\n a[i][j] = i * 20 + j;\n\n for (i = 0; i < 20 -1; i += 1) {\n #pragma omp parallel for private(j ) \n for (j = 0; j < 20; j += 1) {\n a[i][j] += a[i + 1][j];\n }\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "memset(a,0,(sizeof(a)));\n\n #pragma omp parallel for private(i ,j ) \n for (i = 0; i < 20; i++)\n for (j = 0; j < 20; j++)\n a[i][j] = i * 20 + j;\n\n for (i = 0; i < 20 -1; i += 1) {\n #pragma omp parallel for private(j ) \n for (j = 0; j < 20; j += 1) {\n a[i][j] += a[i + 1][j];\n }\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB053-inneronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": ") \n for (j = 0; j < 20; j++)\n a[i][j] = i * 20 + j;\n\n for (i = 0; i < 20 -1; i += 1) {\n for (j = 0; j < 20; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB108-atomic-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:a) ", "context_chars": 100, "text": "*\n * Test if atomic can be recognized properly. No data races.\n * */\nint main (void)\n{\n int a=0;\n for (int i = 0; i < 100; i++)\n {\n a += 1;\n } #pragma omp parallel for reduction(+:a) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB112-linear-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "\n*/\n#include \nint main()\n{\n int len=100;\n double a[len], b[len], c[len];\n int i,j=0;\n\n for (i=0;i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB112-linear-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) linear(j ) ", "context_chars": 100, "text": "en;i++)\n {\n a[i]=((double)i)/2.0; \n b[i]=((double)i)/3.0; \n c[i]=((double)i)/7.0; \n }\n\n for (i=0;i #pragma omp parallel for private(i ) linear(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB016-outputdep-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) lastprivate(i ) ", "context_chars": 100, "text": "x@74:5 vs. x@74:5\n*/\n\n#include \nint a[100];\n\nint main()\n{\n int len=100; \n int i,x=10;\n\n for (i=0;i #pragma omp parallel for firstprivate(i ) lastprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB109-orderedmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:x) ", "context_chars": 100, "text": " * Missing the ordered clause\n * Data race pair: x@56:5 vs. x@56:5\n * */\nint main()\n{\n int x =0;\n for (int i = 0; i < 100; ++i) {\n x++;\n } #pragma omp parallel for reduction(+:x) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB008-indirectaccess4-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "e * xa1 = base;\n double * xa2 = xa1 + 12;\n int i;\n\n // initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB101-task-value-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "100\nint a[MYLEN];\nint b[MYLEN];\n\nvoid gen_task(int i)\n{\n a[i]= i+1;\n}\n\nint main()\n{\n int i=0;\n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB101-task-value-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " private(i ) \n for (i=0; ifor (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB110-ordered-orig-no.c", "omp_pragma_line": "#pragma omp parallel for ordered", "context_chars": 100, "text": "niv.\n * Proper user of ordered directive and clause, no data races\n * */\nint main()\n{\n int x =0;\n for (int i = 0; i < 100; ++i) {\n #pragma omp ordered\n x++;\n } #pragma omp parallel for ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "parallelized so no race condition.\n*/\ndouble b[100][100];\n#define N 100\nint init()\n{\n int i,j,k;\n for (i = 0; i < N; i++) {\n #pragma omp parallel for private(j ) \n for (j = 0; j < N; j++) {\n b[i][j] = i * j;\n }\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "nt init()\n{\n int i,j,k;\n #pragma omp parallel for private(i ,j ) \n for (i = 0; i < N; i++) {\n for (j = 0; j < N; j++) {\n b[i][j] = i * j;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB064-outeronly2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "j < N; j++) {\n b[i][j] = i * j;\n }\n }\n return 0;\n}\n\nvoid foo(int n, int m)\n{\n int i,j;\n for (i=0;i #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB017-outputdep-var-yes.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(len ,a ,i ) lastprivate(i ) ", "context_chars": 100, "text": "* argv[])\n{\n int len=100; \n\n if (argc>1)\n len = atoi(argv[1]);\n\n int a[len];\n int i,x=10;\n\n for (i=0;i #pragma omp parallel for firstprivate(len ,a ,i ) lastprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB059-lastprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) lastprivate(x ) ", "context_chars": 100, "text": " back to the shared one within the last iteration.\n*/\n#include \n\nvoid foo()\n{\n int i,x;\n for (i=0;i<100;i++)\n x=i;\n printf(\"x=%d\",x);\n}\n\nint main()\n{\n foo();\n return 0;\n} #pragma omp parallel for private(i ) lastprivate(x ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c4 ,c2 ,c3 ) ", "context_chars": 100, "text": ")\n{\n //int i;\n //int j;\n{\n int c1;\n int c3;\n int c2;\n int c4;\n if (n >= 1) {\n for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n #pragma omp parallel for private(c2 ,c4 ,c3 ) \n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n \t #pragma omp parallel for private(c3 ,c4 ) \n \t for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {\n\t #pragma omp parallel for private(c4 ) \n\t for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {\n X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;\n A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;\n B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;\n }\n }\n }\n } #pragma omp parallel for private(c1 ,c4 ,c2 ,c3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ,c4 ,c3 ) ", "context_chars": 100, "text": " 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n \t #pragma omp parallel for private(c3 ,c4 ) \n \t for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {\n\t #pragma omp parallel for private(c4 ) \n\t for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {\n X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;\n A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;\n B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;\n }\n }\n } #pragma omp parallel for private(c2 ,c4 ,c3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c3 ,c4 ) ", "context_chars": 100, "text": "0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n \t for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {\n\t #pragma omp parallel for private(c4 ) \n\t for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {\n X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;\n A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;\n B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c3 ,c4 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c4 ) ", "context_chars": 100, "text": " ) \n \t for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {\n\t for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {\n X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;\n A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;\n B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;\n } #pragma omp parallel for private(c4 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ,c15 ,c9 ,c8 ) ", "context_chars": 100, "text": "= 1 && tsteps >= 1) {\n for (c0 = 0; c0 <= tsteps + -1; c0++) {\n if (n >= 2) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n B[c15][c9] = B[c15][c9] - A[c15][c9] * A[c15][c9] / B[c15][c9 - 1];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][c9] = X[c15][c9] - X[c15][c9 - 1] * A[c15][c9] / B[c15][c9 - 1];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {\n for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][n - c9 - 2] = (X[c15][n - 2 - c9] - X[c15][n - 2 - c9 - 1] * A[c15][n - c9 - 3]) / B[c15][n - 3 - c9];\n }\n }\n }\n } #pragma omp parallel for private(c2 ,c15 ,c9 ,c8 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ,c15 ) ", "context_chars": 100, "text": "/ B[c15][n - 3 - c9];\n }\n }\n }\n }\n }\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n #pragma omp parallel for \n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];\n }\n } #pragma omp parallel for private(c2 ,c15 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ,c15 ,c9 ,c8 ) ", "context_chars": 100, "text": "[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];\n }\n }\n if (n >= 2) {\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n B[c9][c15] = B[c9][c15] - A[c9][c15] * A[c9][c15] / B[c9 - 1][c15];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {\n for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[c9][c15] = X[c9][c15] - X[c9 - 1][c15] * A[c9][c15] / B[c9 - 1][c15];\n }\n }\n }\n for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {\n for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[n - 2 - c9][c15] = (X[n - 2 - c9][c15] - X[n - c9 - 3][c15] * A[n - 3 - c9][c15]) / B[n - 2 - c9][c15];\n }\n }\n }\n } #pragma omp parallel for private(c2 ,c15 ,c9 ,c8 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ,c15 ) ", "context_chars": 100, "text": "/ B[n - 2 - c9][c15];\n }\n }\n }\n }\n }\n for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n #pragma omp parallel for \n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15];\n }\n } #pragma omp parallel for private(c2 ,c15 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB044-adi-tile-no.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {\n for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {\n X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB067-restrictpointer1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "double real8;\n\nvoid foo(real8 * restrict newSxx, real8 * restrict newSyy, int length)\n{\n int i;\n\n for (i = 0; i <= length - 1; i += 1) {\n newSxx[i] = 0.0;\n newSyy[i] = 0.0;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB024-simdtruedep-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "clude \nint main(int argc, char* argv[])\n{\n int i;\n int len=100;\n int a[100], b[100];\n\n for (i=0;i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB091-threadprivate2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:sum0) ", "context_chars": 100, "text": "o.h>\n#include \n\nint sum0=0, sum1=0;\n\nint main()\n{\n int len=1000;\n int i, sum=0;\n {\n for (i=0;i #pragma omp parallel for private(i ) reduction(+:sum0) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB091-threadprivate2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:sum1) ", "context_chars": 100, "text": ";ifor (i=0;i #pragma omp parallel for private(i ) reduction(+:sum1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB018-plusplus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "nt input[1000]; \nint output[1000];\n\nint main()\n{\n int i ;\n int inLen=1000 ; \n int outLen = 0;\n\n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB097-target-teams-distribute-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "n = 2560;\n double sum =0.0, sum2=0.0;\n double a[len], b[len];\n /*Initialize with some values*/\n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB084-threadprivatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:sum0) ", "context_chars": 100, "text": "pragma omp threadprivate(sum0)\n\nvoid foo (int i)\n{\n sum0=sum0+i;\n}\n\nint main()\n{\n int i, sum=0;\n for (i=1;i<=1000;i++)\n {\n foo (i);\n } #pragma omp parallel for private(i ) reduction(+:sum0) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB084-threadprivatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:sum1) ", "context_chars": 100, "text": ") \n for (i=1;i<=1000;i++)\n {\n foo (i);\n } \n sum=sum+sum0;\n\n/* reference calculation */\n for (i=1;i<=1000;i++)\n {\n sum1=sum1+i;\n } #pragma omp parallel for private(i ) reduction(+:sum1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB049-fprintf-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "ain(int argc, char* argv[])\n{\n int i;\n int ret;\n FILE* pfile;\n int len=1000;\n\n int A[1000];\n\n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB099-targetparallelfor2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " map + array sections derived from pointers\n*/\nvoid foo (double* a, double* b, int N)\n{\n int i; \n for (i=0;i< N ;i++)\n b[i]=a[i]*(double)i;\n}\n\nint main(int argc, char* argv[])\n{\n int i;\n int len = 1000;\n double a[len], b[len];\n #pragma omp parallel for private(i ) \n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB099-targetparallelfor2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "uble)i;\n}\n\nint main(int argc, char* argv[])\n{\n int i;\n int len = 1000;\n double a[len], b[len];\n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB066-pointernoaliasing-orig-no.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "* ) malloc (sizeof (double) * N );\n double * m_nvol = (double* ) malloc (sizeof (double) * N );\n\n for (int i=0; i < N; ++i ) \n { \n m_pdv_sum[ i ] = 0.0;\n m_nvol[ i ] = i*2.5;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB092-threadprivatemissing2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:sum0) ", "context_chars": 100, "text": "sert.h>\nint sum0=0, sum1=0;\n//#pragma omp threadprivate(sum0)\n\nint main()\n{\n int i, sum=0;\n {\n for (i=1;i<=1000;i++)\n {\n sum0=sum0+i;\n } #pragma omp parallel for private(i ) reduction(+:sum0) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB092-threadprivatemissing2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:sum1) ", "context_chars": 100, "text": "=1000;i++)\n {\n sum0=sum0+i;\n }\n } \n sum= sum+sum0;\n/* reference calculation */\n for (i=1;i<=1000;i++)\n {\n sum1=sum1+i;\n } #pragma omp parallel for private(i ) reduction(+:sum1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "l loop parallelization\n*/\n#define N 100\n\ndouble a[N][N],v[N],v_out[N];\n\nint init()\n{\n int i,j,k;\n for (i = 0; i < N; i++) {\n #pragma omp parallel for private(j ) \n for (j = 0; j < N; j++) {\n a[i][j] = i * j;\n }\n v_out[i] = i * j;\n v[i] = i * j;\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "nt init()\n{\n int i,j,k;\n #pragma omp parallel for private(i ,j ) \n for (i = 0; i < N; i++) {\n for (j = 0; j < N; j++) {\n a[i][j] = i * j;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": " }\n v_out[i] = i * j;\n v[i] = i * j;\n }\n return 0;\n}\n\nint mv()\n{ \n int i,j;\n for (i = 0; i < N; i++)\n { \n float sum = 0.0;\n #pragma omp parallel for private(j ) reduction(+:sum) \n for (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } \n v_out[i] = sum;\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB061-matrixvector1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) reduction(+:sum) ", "context_chars": 100, "text": "ma omp parallel for private(i ,j ) \n for (i = 0; i < N; i++)\n { \n float sum = 0.0;\n for (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } #pragma omp parallel for private(j ) reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "uble B[500 + 0][500 + 0])\n{\n //int i;\n //int j;\n{\n int c2;\n int c1;\n if (n >= 1) {\n for (c1 = 0; c1 <= n + -1; c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= n + -1; c2++) {\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "{\n #pragma omp parallel for private(c1 ,c2 ) \n for (c1 = 0; c1 <= n + -1; c1++) {\n for (c2 = 0; c2 <= n + -1; c2++) {\n A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;\n B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " c0 + 3 * c2 + 2) / 3] + A[1 - 1][(-2 * c0 + 3 * c2 + 2) / 3]);\n }\n }\n }\n for (c1 = ((((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) > c0 + -9?(((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) : c0 + -9); c1 <= (((((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) < c0?(((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) : c0)); c1++) {\n B[-2 * c0 + 3 * c1][1] = 0.2 * (A[-2 * c0 + 3 * c1][1] + A[-2 * c0 + 3 * c1][1 - 1] + A[-2 * c0 + 3 * c1][1 + 1] + A[1 + (-2 * c0 + 3 * c1)][1] + A[-2 * c0 + 3 * c1 - 1][1]);\n for (c2 = 2 * c0 + -2 * c1 + 2; c2 <= 2 * c0 + -2 * c1 + 498; c2++) {\n A[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1] = B[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1];\n B[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] = 0.2 * (A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2 - 1] + A[-2 * c0 + 3 * c1][1 + (-2 * c0 + 2 * c1 + c2)] + A[1 + (-2 * c0 + 3 * c1)][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1 - 1][-2 * c0 + 2 * c1 + c2]);\n }\n A[-2 * c0 + 3 * c1 + -1][498] = B[-2 * c0 + 3 * c1 + -1][498];\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " + 3 * c1 + -1][498];\n }\n if (c0 >= 499) {\n if ((2 * c0 + 1) % 3 == 0) {\n for (c2 = ((2 * c0 + -992) * 3 < 0?-(-(2 * c0 + -992) / 3) : ((3 < 0?(-(2 * c0 + -992) + - 3 - 1) / - 3 : (2 * c0 + -992 + 3 - 1) / 3))); c2 <= (((2 * c0 + 499) * 3 < 0?((3 < 0?-((-(2 * c0 + 499) + 3 + 1) / 3) : -((-(2 * c0 + 499) + 3 - 1) / 3))) : (2 * c0 + 499) / 3)); c2++) {\n A[498][(-2 * c0 + 3 * c2 + 995) / 3] = B[498][(-2 * c0 + 3 * c2 + 995) / 3];\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB055-jacobi2d-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " 3 * c2 + 995) / 3] = B[498][(-2 * c0 + 3 * c2 + 995) / 3];\n }\n }\n }\n }\n for (c2 = 20; c2 <= 517; c2++) {\n A[498][c2 + -19] = B[498][c2 + -19];\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB065-pireduction-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,x ) reduction(+:pi) ", "context_chars": 100, "text": "le pi = 0.0;\n long int i;\n double x, interval_width;\n interval_width = 1.0/(double)num_steps;\n\n for (i = 0; i < num_steps; i++) {\n x = (i+ 0.5) * interval_width;\n pi += 1.0 / (x*x + 1.0);\n } #pragma omp parallel for private(i ,x ) reduction(+:pi) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "rgv[])\n{\n int i;\n int tmp;\n int len=100;\n if (argc>1)\n len = atoi(argv[1]);\n int a[len];\n\n for (i=0;i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(tmp ) ", "context_chars": 100, "text": "gv[1]);\n int a[len];\n\n #pragma omp parallel for private(i ) \n for (i=0;ifor (i=0;i #pragma omp parallel for private(tmp ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB006-indirectaccess2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "c(). Aborting ...\\n\");\n return 1; \n }\n int i;\n\n // initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n xa1[i]=0.5*i;\n xa2[i]=1.5*i;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB006-indirectaccess2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "allel for private(i ) \n for (i =521; i<= 2025; ++i)\n {\n xa1[i]=0.5*i;\n xa2[i]=1.5*i;\n }\n\n for (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0;\n xa2[idx]+= 3.0;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB050-functionparameter-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(c ,o1 ,i ,len ) ", "context_chars": 100, "text": "Arrays passed as function parameters\n*/\nvoid foo1(double o1[], double c[], int len)\n{ \n int i ;\n\n for (i = 0; i < len; ++i) {\n double volnew_o8 = 0.5 * c[i];\n o1[i] = volnew_o8;\n } #pragma omp parallel for firstprivate(c ,o1 ,i ,len ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB050-functionparameter-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "i] = volnew_o8;\n } \n}\n\nint main()\n{\n double o1[101];\n double c[101];\n int i;\n int len = 100;\n for (i = 0; i < len; ++i) {\n c[i] = i + 1.01;\n o1[i] = i + 1.01;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "Arrays passed as function parameters\n*/\nvoid foo1(double o1[], double c[], int len)\n{ \n int i ;\n\n for (i = 0; i < len; ++i) {\n double volnew_o8 = 0.5 * c[i];\n o1[i] = volnew_o8;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "o1[i] = volnew_o8;\n } \n}\n\ndouble o1[100];\ndouble c[100];\n\nint main()\n{\n int i;\n int len = 100;\n for (i = 0; i < len; ++i) {\n c[i] = i + 1.01;\n o1[i] = i + 1.01;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "vate. \n*/\nint main()\n{\n int len=100;\n double a[len][len], b[len][len], c[len][len];\n int i,j;\n\n for (i=0;i #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB098-simd2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "len], c[len][len];\n int i,j;\n\n #pragma omp parallel for private(i ,j ) \n for (i=0;ifor (j=0;j #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "lt(shared) to cover another option.\n*/\n\nint a[100][100];\nint b[100][100];\nint main()\n{\n int i,j;\n for (i=0;i<100;i++) {\n #pragma omp parallel for private(j ) \n for (j=0;j<100;j++) {\n a[i][j] = i;\n b[i][j] = i;\n }\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB113-default-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "00];\nint main()\n{\n int i,j;\n #pragma omp parallel for private(i ,j ) \n for (i=0;i<100;i++) {\n for (j=0;j<100;j++) {\n a[i][j] = i;\n b[i][j] = i;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB100-task-reference-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " MYLEN 100\nint a[MYLEN];\n\nvoid gen_task(int i)\n{\n a[i]= i+1;\n}\n\nint main()\n{\n int i=0;\n {\n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,xx ,yy ) ", "context_chars": 100, "text": " xx, yy;\n\n dx = 2.0 / (n - 1);\n dy = 2.0 / (m - 1);\n\n/* Initialize initial condition and RHS */\n for (i = 0; i < n; i++)\n #pragma omp parallel for private(j ,xx ,yy ) \n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(i ,j ,xx ,yy ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ,xx ,yy ) ", "context_chars": 100, "text": "ndition and RHS */\n #pragma omp parallel for private(i ,j ,xx ,yy ) \n for (i = 0; i < n; i++)\n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(j ,xx ,yy ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": " k = 1;\n\n while (k <= mits)\n {\n error = 0.0;\n\n /* Copy new solution into old */\n for (i = 0; i < n; i++)\n #pragma omp parallel for \n for (j = 0; j < m; j++)\n uold[i][j] = u[i][j];\n\n #pragma omp parallel for private(j ,resid ) reduction(+:error) \n for (i = 1; i < (n - 1); i++)\n \t #pragma omp parallel for private(resid ) reduction(+:error) \n \t for (j = 1; j < (m - 1); j++)\n {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j])\n + ay * (uold[i][j - 1] + uold[i][j + 1]) +\n b * uold[i][j] - f[i][j]) / b;\n\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "into old */\n #pragma omp parallel for private(j ) \n for (i = 0; i < n; i++)\n for (j = 0; j < m; j++)\n uold[i][j] = u[i][j];\n\n #pragma omp parallel for private(j ,resid ) reduction(+:error) \n for (i = 1; i < (n - 1); i++)\n \t #pragma omp parallel for private(resid ) reduction(+:error) \n \t for (j = 1; j < (m - 1); j++)\n {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j])\n + ay * (uold[i][j - 1] + uold[i][j + 1]) +\n b * uold[i][j] - f[i][j]) / b;\n\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ,resid ) reduction(+:error) ", "context_chars": 100, "text": "agma omp parallel for \n for (j = 0; j < m; j++)\n uold[i][j] = u[i][j];\n\n for (i = 1; i < (n - 1); i++)\n \t #pragma omp parallel for private(resid ) reduction(+:error) \n \t for (j = 1; j < (m - 1); j++)\n {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j])\n + ay * (uold[i][j - 1] + uold[i][j + 1]) +\n b * uold[i][j] - f[i][j]) / b;\n\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n } #pragma omp parallel for private(j ,resid ) reduction(+:error) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB058-jacobikernel-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(resid ) reduction(+:error) ", "context_chars": 100, "text": " parallel for private(j ,resid ) reduction(+:error) \n for (i = 1; i < (n - 1); i++)\n \t for (j = 1; j < (m - 1); j++)\n {\n resid = (ax * (uold[i - 1][j] + uold[i + 1][j])\n + ay * (uold[i][j - 1] + uold[i][j + 1]) +\n b * uold[i][j] - f[i][j]) / b;\n\n u[i][j] = uold[i][j] - omega * resid;\n error = error + resid * resid;\n } #pragma omp parallel for private(resid ) reduction(+:error) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB070-simd1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "ay computation with a vetorization directive\n*/\nint a[100], b[100], c[100];\nint main()\n{\n int i;\n for (i=0;i<100;i++) {\n a[i]= i * 40;\n b[i] = i - 1;\n c[i] = i;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB070-simd1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "el for private(i ) \n for (i=0;i<100;i++) {\n a[i]= i * 40;\n b[i] = i - 1;\n c[i] = i;\n }\n\n for (i=0;i<100;i++)\n a[i]=b[i]*c[i];\n\n for (i=0;i<100;i++) {\n printf(\"%d %d %d\\n\", a[i], b[i], c[i]);\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "clude\n\nint main(int argc, char* argv[])\n{\n int i;\n int len=100;\n int a[len], b[len];\n\n for (i=0;i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ,tmp ) ", "context_chars": 100, "text": "\n\t a[i]=i;\n\t b[i]=i;\n } \n /* static storage for a local variable */\n {\n static int tmp;\n for (i=0;i #pragma omp parallel for private(i ,tmp ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB090-static-local-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ,tmp ) ", "context_chars": 100, "text": "]+i;\n a[i] = tmp;\n }\n }\n\n/* automatic storage for a local variable */\n {\n int tmp;\n for (i=0;i #pragma omp parallel for private(i ,tmp ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "level parallelization.\n*/\n#define N 1000\ndouble a[N][N],v[N],v_out[N];\n\nint init()\n{\n int i,j,k;\n for (i = 0; i < N; i++) {\n #pragma omp parallel for private(j ) \n for (j = 0; j < N; j++) {\n a[i][j] = i * j;\n }\n v_out[i] = i * j;\n v[i] = i * j;\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "nt init()\n{\n int i,j,k;\n #pragma omp parallel for private(i ,j ) \n for (i = 0; i < N; i++) {\n for (j = 0; j < N; j++) {\n a[i][j] = i * j;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": " }\n v_out[i] = i * j;\n v[i] = i * j;\n }\n return 0;\n}\n\nvoid mv()\n{ \n int i,j;\n for (i = 0; i < N; i++)\n { \n float sum = 0.0;\n #pragma omp parallel for private(j ) reduction(+:sum) \n for (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } \n v_out[i] = sum;\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB062-matrixvector2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) reduction(+:sum) ", "context_chars": 100, "text": "ma omp parallel for private(i ,j ) \n for (i = 0; i < N; i++)\n { \n float sum = 0.0;\n for (j = 0; j < N; j++)\n { \n sum += a[i][j]*v[j];\n } #pragma omp parallel for private(j ) reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB111-linearmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " j@67:7 vs. j@68:5 \n*/\nint main()\n{\n int len=100;\n double a[len], b[len], c[len];\n int i,j=0;\n\n for (i=0;i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB111-linearmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) linear(j ) ", "context_chars": 100, "text": "en;i++)\n {\n a[i]=((double)i)/2.0; \n b[i]=((double)i)/3.0; \n c[i]=((double)i)/7.0; \n }\n\n for (i=0;i #pragma omp parallel for private(i ) linear(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "de \nint main(int argc, char* argv[])\n{\n int i;\n int tmp;\n int len=100;\n int a[100];\n\n for (i=0;i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ,tmp ) ", "context_chars": 100, "text": "en=100;\n int a[100];\n\n #pragma omp parallel for private(i ) \n for (i=0;ifor (i=0;i #pragma omp parallel for private(i ,tmp ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "nclude \n\n/*\nExample use of firstprivate()\n*/\nvoid foo(int * a, int n, int g)\n{\n int i;\n for (i=0;i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " for (i=0;ifor (i=0;i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "rallelized. \n \n*/\nint n=100, m=100;\ndouble b[100][100];\n#define N 100\n\nint init()\n{\n int i,j,k;\n for (i = 0; i < N; i++) {\n #pragma omp parallel for private(j ) \n for (j = 0; j < N; j++) {\n b[i][j] = i * j;\n }\n } #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "nt init()\n{\n int i,j,k;\n #pragma omp parallel for private(i ,j ) \n for (i = 0; i < N; i++) {\n for (j = 0; j < N; j++) {\n b[i][j] = i * j;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB063-outeronly1-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "for (j = 0; j < N; j++) {\n b[i][j] = i * j;\n }\n }\n return 0;\n}\n\nvoid foo()\n{\n int i,j;\n for (i=0;i #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "ble D[128 + 0][128 + 0])\n{\n //int i;\n //int j;\n{\n int c2;\n int c1;\n if (nl >= 1) {\n for (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= ((((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) < nm + -1?((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nl; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nl > nm?nl : nm); c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= ((((nk + -1 < nl + -1?nk + -1 : nl + -1)) < nm + -1?((nk + -1 < nl + -1?nk + -1 : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nj > nl?nj : nl); c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nj > nm?nj : nm); c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nk > nl?nk : nl); c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nk > nm?nk : nm); c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nj > nk?nj : nk); c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) < nm + -1?((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "(double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nl > nm?nl : nm); c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((((nk + -1 < nl + -1?nk + -1 : nl + -1)) < nm + -1?((nk + -1 < nl + -1?nk + -1 : nl + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nj > nl?nj : nl); c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nj > nm?nj : nm); c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "nl)) : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nk > nl?nk : nl); c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nk > nm?nk : nm); c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = (nj > nk?nj : nk); c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nl <= 0) {\n for (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nj > nm?nj : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nk > nm?nk : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nj > nk?nj : nk); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nj > nm?nj : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "m?nj : nm); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nk > nm?nk : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = (nj > nk?nj : nk); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nm >= 1) {\n for (c1 = nm; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "r (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nm <= 0) {\n for (c1 = 0; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "j + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) {\n \tfor (c2 = 0; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "r (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nj >= 1 && nl >= 1) {\n for (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n\t#pragma omp parallel for private(c2 ) \n\tfor (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nl; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nj > nl?nj : nl); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n\tfor (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nj > nl?nj : nl); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "l?nj : nl); c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nj >= 1 && nl <= 0) {\n for (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": ") {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nj >= 1) {\n for (c1 = (nj > nm?nj : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": ",c2 ) \n for (c1 = (nj > nm?nj : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nj <= 0 && nl >= 1) {\n for (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "r (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nj <= 0 && nl <= 0) {\n for (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "k + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": ") {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nj <= 0) {\n for (c1 = (0 > nm?0 : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " ) \n for (c1 = (0 > nm?0 : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\n for (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nk > nl?nk : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nk; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nk > nl?nk : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl <= 0) {\n for (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "+ -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk >= 1 && nm >= 1) {\n for (c1 = (nk > nm?nk : nm); c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "c2 ) \n for (c1 = (nk > nm?nk : nm); c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n \tfor (c2 = 0; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "r (c2 = nm; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk >= 1 && nm <= 0) {\n for (c1 = nk; c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "or private(c1 ,c2 ) \n for (c1 = nk; c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\n for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "2 ) \n for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "] = ((double )c1) * c2 / ni;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "r (c2 = nl; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n for (c2 = nk; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl <= 0) {\n for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " \n for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": ") {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nk >= 1) {\n for (c1 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c1 <= ni + -1; c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " ) \n for (c1 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c1 <= ni + -1; c1++) {\n \tfor (c2 = 0; c2 <= nk + -1; c2++) {\n A[c1][c2] = ((double )c1) * c2 / ni;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": ") {\n A[c1][c2] = ((double )c1) * c2 / ni;\n }\n }\n }\n if (nl >= 1) {\n for (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nl; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nl > nm?nl : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nj > nl?nj : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = (nj > nm?nj : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "+ -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nl > nm?nl : nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " nm); c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = (nj > nl?nj : nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " nl); c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = (nj > nm?nj : nm); c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nl <= 0) {\n for (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "= nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nm >= 1) {\n for (c1 = (ni > nm?ni : nm); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "2 ) \n for (c1 = (ni > nm?ni : nm); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nm <= 0) {\n for (c1 = (0 > ni?0 : ni); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": ",c2 ) \n for (c1 = (0 > ni?0 : ni); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nj >= 1 && nl >= 1) {\n for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nl; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nj; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "2 ) \n for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 1) / nj;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "= nl; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n for (c2 = nj; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nj >= 1 && nl <= 0) {\n for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "c2 ) \n for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": " B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nj >= 1) {\n for (c1 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c1 <= nk + -1; c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": ") \n for (c1 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c1 <= nk + -1; c1++) {\n \tfor (c2 = 0; c2 <= nj + -1; c2++) {\n B[c1][c2] = ((double )c1) * (c2 + 1) / nj;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 1) / nj;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\n for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " ) \n for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "= nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl <= 0) {\n for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " \n for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk >= 1 && nm >= 1) {\n for (c1 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c1 <= nj + -1; c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "\n for (c1 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c1 <= nj + -1; c1++) {\n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk <= 0 && nl >= 1) {\n for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n #pragma omp parallel for private(c2 ) \n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "private(c1 ,c2 ) \n for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "double )c1) * (c2 + 3) / nl;\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n for (c2 = nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "= nl; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n for (c2 = nm; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk <= 0 && nl <= 0) {\n for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " private(c1 ,c2 ) \n for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nk <= 0 && nm >= 1) {\n for (c1 = nm; c1 <= nj + -1; c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " #pragma omp parallel for private(c1 ,c2 ) \n for (c1 = nm; c1 <= nj + -1; c1++) {\n for (c2 = 0; c2 <= nm + -1; c2++) {\n C[c1][c2] = ((double )c1) * (c2 + 3) / nl;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 3) / nl;\n }\n }\n }\n if (nj <= 0 && nl >= 1) {\n for (c1 = (0 > ni?0 : ni); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "2 ) \n for (c1 = (0 > ni?0 : ni); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) {\n \tfor (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk >= 1 && nl >= 1) {\n for (c1 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c1 <= nm + -1; c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " for (c1 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c1 <= nm + -1; c1++) {\n for (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n }\n }\n if (nk <= 0 && nl >= 1) {\n for (c1 = (0 > nj?0 : nj); c1 <= nm + -1; c1++) {\n \t#pragma omp parallel for private(c2 ) \n \tfor (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": "a omp parallel for private(c1 ,c2 ) \n for (c1 = (0 > nj?0 : nj); c1 <= nm + -1; c1++) {\n \tfor (c2 = 0; c2 <= nl + -1; c2++) {\n D[c1][c2] = ((double )c1) * (c2 + 2) / nk;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "])\n{\n //int i;\n //int j;\n //int k;\n \n //#pragma scop\n{\n int c1;\n int c2;\n int c5;\n for (c1 = 0; c1 <= 127; c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= 127; c2++) {\n G[c1][c2] = 0;\n F[c1][c2] = 0;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " int c5;\n #pragma omp parallel for private(c1 ,c2 ) \n for (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n G[c1][c2] = 0;\n F[c1][c2] = 0;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c5 ,c2 ) ", "context_chars": 100, "text": " for (c2 = 0; c2 <= 127; c2++) {\n G[c1][c2] = 0;\n F[c1][c2] = 0;\n }\n }\n for (c1 = 0; c1 <= 127; c1++) {\n #pragma omp parallel for private(c2 ,c5 ) \n for (c2 = 0; c2 <= 127; c2++) {\n #pragma omp parallel for private(c5 ) \n for (c5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n }\n }\n } #pragma omp parallel for private(c1 ,c5 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ,c5 ) ", "context_chars": 100, "text": "\n }\n #pragma omp parallel for private(c1 ,c5 ,c2 ) \n for (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n #pragma omp parallel for private(c5 ) \n for (c5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n }\n } #pragma omp parallel for private(c2 ,c5 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5 ) ", "context_chars": 100, "text": "+) {\n #pragma omp parallel for private(c2 ,c5 ) \n for (c2 = 0; c2 <= 127; c2++) {\n for (c5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n } #pragma omp parallel for private(c5 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c2 ) ", "context_chars": 100, "text": "5 = 0; c5 <= 127; c5++) {\n F[c1][c2] += C[c1][c5] * D[c5][c2];\n }\n }\n }\n for (c1 = 0; c1 <= 127; c1++) {\n #pragma omp parallel for private(c2 ) \n for (c2 = 0; c2 <= 127; c2++) {\n E[c1][c2] = 0;\n }\n } #pragma omp parallel for private(c1 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c2 ) ", "context_chars": 100, "text": " }\n }\n #pragma omp parallel for private(c1 ,c2 ) \n for (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n E[c1][c2] = 0;\n } #pragma omp parallel for private(c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c1 ,c5 ,c2 ) ", "context_chars": 100, "text": "el for private(c2 ) \n for (c2 = 0; c2 <= 127; c2++) {\n E[c1][c2] = 0;\n }\n }\n for (c1 = 0; c1 <= 127; c1++) {\n #pragma omp parallel for private(c5 ,c2 ) \n for (c2 = 0; c2 <= 127; c2++) {\n #pragma omp parallel for private(c5 ) \n for (c5 = 0; c5 <= 127; c5++) {\n E[c1][c2] += A[c1][c5] * B[c5][c2];\n }\n #pragma omp parallel for private(c5 ) \n for (c5 = 0; c5 <= 127; c5++) {\n G[c1][c5] += E[c1][c2] * F[c2][c5];\n }\n }\n } #pragma omp parallel for private(c1 ,c5 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5 ,c2 ) ", "context_chars": 100, "text": "\n }\n #pragma omp parallel for private(c1 ,c5 ,c2 ) \n for (c1 = 0; c1 <= 127; c1++) {\n for (c2 = 0; c2 <= 127; c2++) {\n #pragma omp parallel for private(c5 ) \n for (c5 = 0; c5 <= 127; c5++) {\n E[c1][c2] += A[c1][c5] * B[c5][c2];\n }\n #pragma omp parallel for private(c5 ) \n for (c5 = 0; c5 <= 127; c5++) {\n G[c1][c5] += E[c1][c2] * F[c2][c5];\n }\n } #pragma omp parallel for private(c5 ,c2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5 ) ", "context_chars": 100, "text": "+) {\n #pragma omp parallel for private(c5 ,c2 ) \n for (c2 = 0; c2 <= 127; c2++) {\n for (c5 = 0; c5 <= 127; c5++) {\n E[c1][c2] += A[c1][c5] * B[c5][c2];\n } #pragma omp parallel for private(c5 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB041-3mm-parallel-no.c", "omp_pragma_line": "#pragma omp parallel for private(c5 ) ", "context_chars": 100, "text": " for (c5 = 0; c5 <= 127; c5++) {\n E[c1][c2] += A[c1][c5] * B[c5][c2];\n }\n for (c5 = 0; c5 <= 127; c5++) {\n G[c1][c5] += E[c1][c2] * F[c2][c5];\n } #pragma omp parallel for private(c5 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB013-nowait-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "]@75:13.\n*/\n\n#include \nint main()\n{\n int i,error;\n int len = 1000;\n int a[len], b=5;\n\n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "e \nint main(int argc,char *argv[])\n{\n int i, j;\n int len = 20; \n\n double a[20][20];\n\n for (i=0; i< len; i++)\n #pragma omp parallel for private(j ) \n for (j=0; j #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": " 20; \n\n double a[20][20];\n\n #pragma omp parallel for private(i ,j ) \n for (i=0; i< len; i++)\n for (j=0; j #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "r (j=0; jfor (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB005-indirectaccess1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "* xa1 = base;\n double * xa2 = xa1 + 2014;\n int i;\n\n // initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB005-indirectaccess1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "]=0.5*i;\n }\n// default static even scheduling may not trigger data race, using static,1 instead.\n for (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0 + i;\n xa2[idx]+= 3.0 + i;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ) ", "context_chars": 100, "text": "ine M 100 \n#define K 100\ndouble a[N][M],b[M][K],c[N][K];\n\nint init() \n{ \n int i,j,k;\n for (i = 0; i < N; i++) \n #pragma omp parallel for private(j ,k ) \n for (k = 0; k < K; k++) \n #pragma omp parallel for private(j ) \n for (j = 0; j < M; j++) {\n c[i][j] = i * j;\n a[i][k] = i * j;\n b[k][j] = i * j;\n } #pragma omp parallel for private(i ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ) ", "context_chars": 100, "text": " \n int i,j,k;\n #pragma omp parallel for private(i ,j ,k ) \n for (i = 0; i < N; i++) \n for (k = 0; k < K; k++) \n #pragma omp parallel for private(j ) \n for (j = 0; j < M; j++) {\n c[i][j] = i * j;\n a[i][k] = i * j;\n b[k][j] = i * j;\n } #pragma omp parallel for private(j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "i = 0; i < N; i++) \n #pragma omp parallel for private(j ,k ) \n for (k = 0; k < K; k++) \n for (j = 0; j < M; j++) {\n c[i][j] = i * j;\n a[i][k] = i * j;\n b[k][j] = i * j;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ) ", "context_chars": 100, "text": " b[k][j] = i * j;\n }\n return 0; \n} \n \nint mmm() \n{ \n int i,j,k;\n for (i = 0; i < N; i++) \n #pragma omp parallel for private(j ,k ) \n for (k = 0; k < K; k++) \n #pragma omp parallel for private(j ) \n for (j = 0; j < M; j++)\n c[i][j]= c[i][j]+a[i][k]*b[k][j];\n return 0; \n} \n\nint print() \n{ \n int i,j,k;\n for (i = 0; i < N; i++) \n for (k = 0; k < K; k++) \n for (j = 0; j < M; j++)\n printf(\"%lf %lf %lf\\n\", c[i][j],a[i][k],b[k][j]);\n return 0; \n} #pragma omp parallel for private(i ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ) ", "context_chars": 100, "text": " \n int i,j,k;\n #pragma omp parallel for private(i ,j ,k ) \n for (i = 0; i < N; i++) \n for (k = 0; k < K; k++) \n #pragma omp parallel for private(j ) \n for (j = 0; j < M; j++)\n c[i][j]= c[i][j]+a[i][k]*b[k][j];\n return 0; \n} \n\nint print() \n{ \n int i,j,k;\n for (i = 0; i < N; i++) \n for (k = 0; k < K; k++) \n for (j = 0; j < M; j++)\n printf(\"%lf %lf %lf\\n\", c[i][j],a[i][k],b[k][j]);\n return 0; \n} #pragma omp parallel for private(j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB060-matrixmultiply-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "i = 0; i < N; i++) \n #pragma omp parallel for private(j ,k ) \n for (k = 0; k < K; k++) \n for (j = 0; j < M; j++)\n c[i][j]= c[i][j]+a[i][k]*b[k][j];\n return 0; \n} \n\nint print() \n{ \n int i,j,k;\n for (i = 0; i < N; i++) \n for (k = 0; k < K; k++) \n for (j = 0; j < M; j++)\n printf(\"%lf %lf %lf\\n\", c[i][j],a[i][k],b[k][j]);\n return 0; \n} #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB085-threadprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:sum0) ", "context_chars": 100, "text": "sum0=0, sum1=0;\nvoid foo (int i)\n{\n sum0=sum0+i;\n}\n\nint main()\n{\n int len=1000;\n int i, sum=0;\n for (i=0;i #pragma omp parallel for private(i ) reduction(+:sum0) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB085-threadprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(+:sum1) ", "context_chars": 100, "text": "0) \n for (i=0;ifor (i=0;i #pragma omp parallel for private(i ) reduction(+:sum1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB007-indirectaccess3-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "e * xa1 = base;\n double * xa2 = xa1 + 12;\n int i;\n\n // initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "{\n int i;\n int len=100;\n\n int numNodes=len, numNodes2=10; \n int x[100]; \n\n // initialize x[]\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) reduction(-:numNodes2) ", "context_chars": 100, "text": "ate(i ) \n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n }\n\n for (i=numNodes-1 ; i>-1 ; --i) {\n if (x[i]<=0) {\n numNodes2--;\n }\n } #pragma omp parallel for private(i ) reduction(-:numNodes2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB025-simdtruedep-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " argv[])\n{\n int i;\n int len=100;\n\n if (argc>1)\n len = atoi(argv[1]);\n\n int a[len], b[len];\n for (i=0;i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB052-indirectaccesssharebase-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " execution. \\n\");\n return 1; \n }\n\n double * xa1 = base;\n double * xa2 = base + 1;\n int i;\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.0;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB052-indirectaccesssharebase-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": ";\n #pragma omp parallel for private(i ) \n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.0;\n }\n\n for (i =0; i< N; ++i) // this level of loop has no loop carried dependence\n {\n int idx = indexSet[i];\n xa1[idx]+= 4.0;\n xa2[idx]+= 4.0;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB068-restrictpointer2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "de \n\nvoid init(int n, int * restrict a, int * restrict b, int * restrict c)\n{\n int i;\n for (i = 0; i < n; i++) {\n a[i] = 1;\n b[i] = i;\n c[i] = i * i; \n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB068-restrictpointer2-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": " * i; \n }\n}\n\nvoid foo(int n, int * restrict a, int * restrict b, int * restrict c)\n{\n int i;\n for (i = 0; i < n; i++)\n a[i] = b[i] + c[i]; \n}\n\nvoid print(int n, int * restrict a, int * restrict b, int * restrict c)\n{\n int i;\n for (i = 0; i < n; i++) {\n printf(\"%d %d %d\\n\", a[i], b[i], c[i]);\n }\n} #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB057-jacobiinitialize-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,xx ,yy ) ", "context_chars": 100, "text": "x, yy;\n\n dx = 2.0 / (n - 1);\n dy = 2.0 / (m - 1);\n\n /* Initialize initial condition and RHS */\n for (i = 0; i < n; i++)\n #pragma omp parallel for private(j ,xx ,yy ) \n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(i ,j ,xx ,yy ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB057-jacobiinitialize-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(j ,xx ,yy ) ", "context_chars": 100, "text": "ndition and RHS */\n #pragma omp parallel for private(i ,j ,xx ,yy ) \n for (i = 0; i < n; i++)\n for (j = 0; j < m; j++)\n {\n xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */\n yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */\n u[i][j] = 0.0;\n f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)\n - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);\n\n } #pragma omp parallel for private(j ,xx ,yy ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ) ", "context_chars": 100, "text": "])\n{\n int i, j;\n int len = 20; \n\n if (argc>1)\n len = atoi(argv[1]);\n\n double a[len][len];\n\n for (i=0; i< len; i++)\n #pragma omp parallel for private(j ) \n for (j=0; j #pragma omp parallel for private(i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "]);\n\n double a[len][len];\n\n #pragma omp parallel for private(i ,j ) \n for (i=0; i< len; i++)\n for (j=0; j #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "vate(j ) \n for (j=0; jfor (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/DRB104-nowait-barrier-orig-no.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "e \n#include \nint main()\n{\n int i,error;\n int len = 1000;\n int a[len], b=5;\n\n for (i=0; i #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/utilities/polybench.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tmp) ", "context_chars": 100, "text": "flush = (double*) calloc (cs, sizeof(double));\n int i;\n double tmp = 0.0;\n#ifdef _OPENMP\n#endif\n for (i = 0; i < cs; i++)\n tmp += flush[i];\n assert (tmp <= 10.0);\n free (flush);\n}\n\n\n#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER\nvoid polybench_linux_fifo_scheduler()\n{\n /* Use FIFO scheduler to limit OS interference. Program must be run\n as root, and this works only for Linux kernels. */\n struct sched_param schedParam;\n schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);\n sched_setscheduler (0, SCHED_FIFO, &schedParam);\n} #pragma omp parallel for reduction(+:tmp) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,C ,i ) ", "context_chars": 100, "text": "nitialization. */\nstatic\nvoid init_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n for (i = 0; i < n; i++)\n #pragma omp parallel for firstprivate(j ,C ,i ) \n for (j = 0; j < n; j++)\n C[i][j] = 42;\n}\n\n\n/* DCE code. Must scan the entire live-out data.\n Can be used also to check the correctness of the output. */\nstatic\nvoid print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n for (i = 0; i < n; i++)\n for (j = 0; j < n; j++) {\n\tfprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]);\n\tif (i % 20 == 0) fprintf (stderr, \"\\n\");\n }\n fprintf (stderr, \"\\n\");\n} #pragma omp parallel for firstprivate(j ,C ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,C ,i ) ", "context_chars": 100, "text": "n))\n{\n int i, j;\n\n #pragma omp parallel for firstprivate(j ,C ,i ) \n for (i = 0; i < n; i++)\n for (j = 0; j < n; j++)\n C[i][j] = 42;\n}\n\n\n/* DCE code. Must scan the entire live-out data.\n Can be used also to check the correctness of the output. */\nstatic\nvoid print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n for (i = 0; i < n; i++)\n for (j = 0; j < n; j++) {\n\tfprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]);\n\tif (i % 20 == 0) fprintf (stderr, \"\\n\");\n }\n fprintf (stderr, \"\\n\");\n} #pragma omp parallel for firstprivate(j ,C ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,C ,i ) ", "context_chars": 100, "text": "d return. */\nstatic\nvoid kernel_template(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))\n{\n int i, j;\n\n for (i = 0; i < _PB_N; i++)\n #pragma omp parallel for firstprivate(j ,C ,i ) \n for (j = 0; j < _PB_N; j++)\n C[i][j] += 42;\n\n}\n\n\nint main(int argc, char** argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n);\n\n /* Initialize array(s). */\n init_array (n, POLYBENCH_ARRAY(C));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_template (n, POLYBENCH_ARRAY(C));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(C);\n\n return 0;\n} #pragma omp parallel for firstprivate(j ,C ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/dataracebench/utilities/template-for-new-benchmark.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,C ,i ) ", "context_chars": 100, "text": "{\n int i, j;\n\n #pragma omp parallel for firstprivate(j ,C ,i ) \n for (i = 0; i < _PB_N; i++)\n for (j = 0; j < _PB_N; j++)\n C[i][j] += 42;\n\n}\n\n\nint main(int argc, char** argv)\n{\n /* Retrieve problem size. */\n int n = N;\n\n /* Variable declaration/allocation. */\n POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n);\n\n /* Initialize array(s). */\n init_array (n, POLYBENCH_ARRAY(C));\n\n /* Start timer. */\n polybench_start_instruments;\n\n /* Run kernel. */\n kernel_template (n, POLYBENCH_ARRAY(C));\n\n /* Stop and print timer. */\n polybench_stop_instruments;\n polybench_print_instruments;\n\n /* Prevent dead-code elimination. All live-out data must be printed\n by the function call in argument. */\n polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));\n\n /* Be clean. */\n POLYBENCH_FREE_ARRAY(C);\n\n return 0;\n} #pragma omp parallel for firstprivate(j ,C ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/rodinia_3.1/openmp/hotspot3D/3D.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(len ,arr2 ,arr1 ,i ) reduction(+:err) ", "context_chars": 100, "text": "iter);\n\n}\n\nfloat accuracy(float *arr1, float *arr2, int len)\n{\n float err = 0.0; \n int i;\n for(i = 0; i < len; i++)\n {\n err += (arr1[i]-arr2[i]) * (arr1[i]-arr2[i]);\n } #pragma omp parallel for firstprivate(len ,arr2 ,arr1 ,i ) reduction(+:err) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/rodinia_3.1/openmp/hotspot3D/3D.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "rintf(\"%d threads running\\n\", omp_get_num_threads());\n\n do {\n int z; \n for (z = 0; z < nz; z++) {\n int y;\n for (y = 0; y < ny; y++) {\n int x;\n for (x = 0; x < nx; x++) {\n int c, w, e, n, s, b, t;\n c = x + y * nx + z * nx * ny;\n w = (x == 0) ? c : c - 1;\n e = (x == nx-1) ? c : c + 1;\n n = (y == 0) ? c : c - nx;\n s = (y == ny-1) ? c : c + nx;\n b = (z == 0) ? c : c - nx * ny;\n t = (z == nz-1) ? c : c + nx * ny;\n tOut_t[c] = cc * tIn_t[c] + cw * tIn_t[w] + ce * tIn_t[e]\n + cs * tIn_t[s] + cn * tIn_t[n] + cb * tIn_t[b] + ct * tIn_t[t]+(dt/Cap) * pIn[c] + ct*amb_temp;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "\t\t\t\t\t\t\t// updated row coordinates\n\t\tprivate[i].d_T = public.d_endoT;\t\t\t\t\t\t\t\t\t\t\t\t\t\t// templates\n\t}\n\n\tfor(i=public.endoPoints; i #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "lic.conv_cols;\t\t\t\t\t\t\t\t\t\t\t\t// number of elements\n\tpublic.conv_mem = sizeof(fp) * public.conv_elem;\n\n\tfor(i=0; i #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "ublic.in2_pad_rows * public.in2_pad_cols;\n\tpublic.in2_pad_mem = sizeof(fp) * public.in2_pad_elem;\n\n\tfor(i=0; i #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "elem = public.tMask_rows * public.tMask_cols;\n\tpublic.tMask_mem = sizeof(fp) * public.tMask_elem;\n\n\tfor(i=0; i #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "if((public.mask_cols-1) % 2 > 0.5){\n\t\tpublic.mask_conv_joffset = public.mask_conv_joffset + 1;\n\t}\n\n\tfor(i=0; i #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "====================================================\n\n\t\tomp_set_num_threads(omp_num_threads);\n\t\t\n\n\t\tfor(i=0; i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "=================================================================================================\n\n\tfor(i=0; i #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ,m ,i ) ", "context_chars": 100, "text": "or u\nc-------------------------------------------------------------------*/\n\n int i, j, k, m;\n \n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for private(j ,k ,m ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for private(j ,k ,m ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for private(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n\t}\n }\n }\n } #pragma omp parallel for private(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ,m ,i ) ", "context_chars": 100, "text": "\n \n #pragma omp parallel for private(j ,k ,m ,i ) \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for private(j ,k ,m ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for private(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n\t}\n }\n } #pragma omp parallel for private(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ,m ,i ) ", "context_chars": 100, "text": " #pragma omp parallel for private(j ,k ,m ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for private(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n\t}\n } #pragma omp parallel for private(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ,m ,i ) ", "context_chars": 100, "text": " #pragma omp parallel for private(j ,k ,m ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];\n\t} #pragma omp parallel for private(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m ) ", "context_chars": 100, "text": "-------------------------------*/\n\n int i, j, k, m, d;\n double xi, eta, zeta, u_exact[5], add;\n\n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n } #pragma omp parallel for private(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(add ,m) firstprivate(k ,j ,i ) ", "context_chars": 100, "text": "0; k < grid_points[2]; k++) {\n\tzeta = (double)k * dnzm1;\n\texact_solution(xi, eta, zeta, u_exact);\n\n\tfor (m = 0; m < 5; m++) {\n\t add = u[i][j][k][m] - u_exact[m];\n\t rms[m] = rms[m] + add*add;\n\t} #pragma omp parallel for private(add ,m) firstprivate(k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(d ,m ) ", "context_chars": 100, "text": "; m++) {\n\t add = u[i][j][k][m] - u_exact[m];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for firstprivate(m ) \n for (d = 0; d <= 2; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n }\n rms[m] = sqrt(rms[m]);\n } #pragma omp parallel for private(d ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ) ", "context_chars": 100, "text": "d;\n\t}\n }\n }\n }\n\n #pragma omp parallel for private(d ,m ) \n for (m = 0; m < 5; m++) {\n for (d = 0; d <= 2; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n } #pragma omp parallel for firstprivate(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m ) ", "context_chars": 100, "text": "----------------------------------------------------------*/\n\n int i, j, k, d, m;\n double add;\n\n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n } #pragma omp parallel for private(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(add, m) firstprivate(k ,j ,i ) ", "context_chars": 100, "text": "i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[i][j][k][m];\n\t rms[m] = rms[m] + add*add;\n\t} #pragma omp parallel for private(add, m) firstprivate(k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(d ,m ) ", "context_chars": 100, "text": " = 0; m < 5; m++) {\n\t add = rhs[i][j][k][m];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for firstprivate(m ) \n for (d = 0; d <= 2; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n }\n rms[m] = sqrt(rms[m]);\n } #pragma omp parallel for private(d ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ) ", "context_chars": 100, "text": "d;\n\t}\n }\n }\n }\n\n #pragma omp parallel for private(d ,m ) \n for (m = 0; m < 5; m++) {\n for (d = 0; d <= 2; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n } #pragma omp parallel for firstprivate(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ,m ,i ) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for private(j, k, m) firstprivate(i ) \n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for private(k, m) firstprivate(j ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for private(j, k, i) firstprivate(m ) \n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for private(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j, k, m) firstprivate(i ) ", "context_chars": 100, "text": "--*/\n #pragma omp parallel for private(j ,k ,m ,i ) \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for private(k, m) firstprivate(j ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for private(j, k, i) firstprivate(m ) \n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = 0.0;\n\t}\n }\n } #pragma omp parallel for private(j, k, m) firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(k, m) firstprivate(j ,i ) ", "context_chars": 100, "text": "mp parallel for private(j, k, m) firstprivate(i ) \n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for private(j, k, i) firstprivate(m ) \n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = 0.0;\n\t}\n } #pragma omp parallel for private(k, m) firstprivate(j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j, k, i) firstprivate(m ) ", "context_chars": 100, "text": "a omp parallel for private(k, m) firstprivate(j ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = 0.0;\n\t} #pragma omp parallel for private(j, k, i) firstprivate(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m, i, k, j)", "context_chars": 100, "text": "nces \nc-------------------------------------------------------------------*/\n for (j = 1; j < grid_points[1]-1; j++) {\n eta = (double)j * dnym1;\n \n for (k = 1; k < grid_points[2]-1; k++) {\n zeta = (double)k * dnzm1;\n\n for (i = 0; i < grid_points[0]; i++) {\n\txi = (double)i * dnxm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\t#pragma omp parallel for firstprivate(i ,k ,j ) private(m)\n\tfor (m = 0; m < 5; m++) {\n\t ue[i][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0 / dtemp[0];\n\n\t#pragma omp parallel for firstprivate(dtpp ,i ,k ,j ) private(m)\n\tfor (m = 1; m <= 4; m++) {\n\t buf[i][m] = dtpp * dtemp[m];\n\t}\n\n\tcuf[i] = buf[i][1] * buf[i][1];\n\tbuf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + \n\t buf[i][3] * buf[i][3];\n\tq[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +\n\t\t buf[i][3]*ue[i][3]);\n }\n \n #pragma omp parallel for private(i) firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1 ,k ,j ) \n for (i = 1; i < grid_points[0]-1; i++) {\n\tim1 = i-1;\n\tip1 = i+1;\n\n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tx2*(ue[ip1][1]-ue[im1][1])+\n\t dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-\n\t\t (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+\n\t xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+\n\t dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+\n\t xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+\n\t dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);\n \n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+\n\t xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+\n\t dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-\n\t buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+\n\t 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+\n\t xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+\n\t xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+\n\t dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n\n #pragma omp parallel for private(m) firstprivate(dssp ,k ,j ) \n for (m = 0; m < 5; m++) {\n\ti = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);\n\ti = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[i-1][m] + 6.0*ue[i][m] -\n\t 4.0*ue[i+1][m] + ue[i+2][m]);\n }\n\n #pragma omp parallel for private(m) firstprivate(dssp ,k ,j ) \n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for private(i) firstprivate(dssp ,m ,k ,j ) \n\tfor (i = 1*3; i <= grid_points[0]-3*1-1; i++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);\n\t}\n }\n\n #pragma omp parallel for private(m, i) firstprivate(dssp ,k ,j ) \n for (m = 0; m < 5; m++) {\n\ti = grid_points[0]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m]);\n\ti = grid_points[0]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);\n }\n\n }\n } #pragma omp parallel for private(m, i, k, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j ) private(m)", "context_chars": 100, "text": "i = 0; i < grid_points[0]; i++) {\n\txi = (double)i * dnxm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[i][m] = dtemp[m];\n\t} #pragma omp parallel for firstprivate(i ,k ,j ) private(m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dtpp ,i ,k ,j ) private(m)", "context_chars": 100, "text": ",k ,j ) private(m)\n\tfor (m = 0; m < 5; m++) {\n\t ue[i][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0 / dtemp[0];\n\n\tfor (m = 1; m <= 4; m++) {\n\t buf[i][m] = dtpp * dtemp[m];\n\t} #pragma omp parallel for firstprivate(dtpp ,i ,k ,j ) private(m)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(i) firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1 ,k ,j ) ", "context_chars": 100, "text": "(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +\n\t\t buf[i][3]*ue[i][3]);\n }\n \n for (i = 1; i < grid_points[0]-1; i++) {\n\tim1 = i-1;\n\tip1 = i+1;\n\n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tx2*(ue[ip1][1]-ue[im1][1])+\n\t dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-\n\t\t (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+\n\t xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+\n\t dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+\n\t xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+\n\t dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);\n \n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+\n\t xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+\n\t dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-\n\t buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+\n\t 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+\n\t xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+\n\t xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+\n\t dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);\n } #pragma omp parallel for private(i) firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1 ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m) firstprivate(dssp ,k ,j ) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n\n for (m = 0; m < 5; m++) {\n\ti = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);\n\ti = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[i-1][m] + 6.0*ue[i][m] -\n\t 4.0*ue[i+1][m] + ue[i+2][m]);\n } #pragma omp parallel for private(m) firstprivate(dssp ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m) firstprivate(dssp ,k ,j ) ", "context_chars": 100, "text": "- dssp *\n\t (-4.0*ue[i-1][m] + 6.0*ue[i][m] -\n\t 4.0*ue[i+1][m] + ue[i+2][m]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for private(i) firstprivate(dssp ,m ,k ,j ) \n\tfor (i = 1*3; i <= grid_points[0]-3*1-1; i++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);\n\t}\n } #pragma omp parallel for private(m) firstprivate(dssp ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(i) firstprivate(dssp ,m ,k ,j ) ", "context_chars": 100, "text": " #pragma omp parallel for private(m) firstprivate(dssp ,k ,j ) \n for (m = 0; m < 5; m++) {\n\tfor (i = 1*3; i <= grid_points[0]-3*1-1; i++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);\n\t} #pragma omp parallel for private(i) firstprivate(dssp ,m ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m, i) firstprivate(dssp ,k ,j ) ", "context_chars": 100, "text": "(ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\ti = grid_points[0]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] +\n\t 6.0*ue[i][m] - 4.0*ue[i+1][m]);\n\ti = grid_points[0]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);\n } #pragma omp parallel for private(m, i) firstprivate(dssp ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m, j, k, i) ", "context_chars": 100, "text": "x differences \nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n xi = (double)i * dnxm1;\n \n for (k = 1; k < grid_points[2]-1; k++) {\n zeta = (double)k * dnzm1;\n\n for (j = 0; j < grid_points[1]; j++) {\n\teta = (double)j * dnym1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\t#pragma omp parallel for private(i, k, j) firstprivate(m) \n\tfor (m = 0; m < 5; m++) {\n\t ue[j][m] = dtemp[m];\n\t}\n \n\tdtpp = 1.0/dtemp[0];\n\n\t#pragma omp parallel for private(m) firstprivate(dtpp ,j ,k ,i ) \n\tfor (m = 1; m <= 4; m++) {\n\t buf[j][m] = dtpp * dtemp[m];\n\t}\n\n\tcuf[j] = buf[j][2] * buf[j][2];\n\tbuf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + \n\t buf[j][3] * buf[j][3];\n\tq[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +\n\t\t buf[j][3]*ue[j][3]);\n }\n\n #pragma omp parallel for private(j) firstprivate(dy1ty1 ,ty2 ,dy2ty1 ,yycon2 ,dy3ty1 ,yycon1 ,c2 ,dy4ty1 ,dy5ty1 ,yycon5 ,yycon4 ,yycon3 ,c1 ,k ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n\tjm1 = j-1;\n\tjp1 = j+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t ty2*( ue[jp1][2]-ue[jm1][2] )+\n\t dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+\n\t yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+\n\t dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-\n\t (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+\n\t yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+\n\t dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+\n\t yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+\n\t dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-\n\t buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+\n\t 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+\n buf[jm1][0])+\n\t yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+\n\t yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+\n\t dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n #pragma omp parallel for private(m) firstprivate(dssp ,k ,i ) \n for (m = 0; m < 5; m++) {\n\tj = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);\n\tj = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[j-1][m] + 6.0*ue[j][m] -\n\t 4.0*ue[j+1][m] + ue[j+2][m]);\n }\n\n #pragma omp parallel for firstprivate(dssp ,m ,k ,i ) \n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for firstprivate(j ,dssp ,m ,k ,i ) \n\tfor (j = 1*3; j <= grid_points[1]-3*1-1; j++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);\n\t}\n }\n\n #pragma omp parallel for firstprivate(dssp ,j ,m ,k ,i ) \n for (m = 0; m < 5; m++) {\n\tj = grid_points[1]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m]);\n\tj = grid_points[1]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);\n }\n\n }\n } #pragma omp parallel for private(m, j, k, i) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(i, k, j) firstprivate(m) ", "context_chars": 100, "text": " = 0; j < grid_points[1]; j++) {\n\teta = (double)j * dnym1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[j][m] = dtemp[m];\n\t} #pragma omp parallel for private(i, k, j) firstprivate(m) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m) firstprivate(dtpp ,j ,k ,i ) ", "context_chars": 100, "text": ") \n\tfor (m = 0; m < 5; m++) {\n\t ue[j][m] = dtemp[m];\n\t}\n \n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m <= 4; m++) {\n\t buf[j][m] = dtpp * dtemp[m];\n\t} #pragma omp parallel for private(m) firstprivate(dtpp ,j ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(j) firstprivate(dy1ty1 ,ty2 ,dy2ty1 ,yycon2 ,dy3ty1 ,yycon1 ,c2 ,dy4ty1 ,dy5ty1 ,yycon5 ,yycon4 ,yycon3 ,c1 ,k ,i ) ", "context_chars": 100, "text": "];\n\tq[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +\n\t\t buf[j][3]*ue[j][3]);\n }\n\n for (j = 1; j < grid_points[1]-1; j++) {\n\tjm1 = j-1;\n\tjp1 = j+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t ty2*( ue[jp1][2]-ue[jm1][2] )+\n\t dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+\n\t yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+\n\t dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-\n\t (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+\n\t yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+\n\t dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+\n\t yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+\n\t dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-\n\t buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+\n\t 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+\n buf[jm1][0])+\n\t yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+\n\t yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+\n\t dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);\n } #pragma omp parallel for private(j) firstprivate(dy1ty1 ,ty2 ,dy2ty1 ,yycon2 ,dy3ty1 ,yycon1 ,c2 ,dy4ty1 ,dy5ty1 ,yycon5 ,yycon4 ,yycon3 ,c1 ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for private(m) firstprivate(dssp ,k ,i ) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tj = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);\n\tj = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[j-1][m] + 6.0*ue[j][m] -\n\t 4.0*ue[j+1][m] + ue[j+2][m]);\n } #pragma omp parallel for private(m) firstprivate(dssp ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,m ,k ,i ) ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[j-1][m] + 6.0*ue[j][m] -\n\t 4.0*ue[j+1][m] + ue[j+2][m]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for firstprivate(j ,dssp ,m ,k ,i ) \n\tfor (j = 1*3; j <= grid_points[1]-3*1-1; j++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);\n\t}\n } #pragma omp parallel for firstprivate(dssp ,m ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,dssp ,m ,k ,i ) ", "context_chars": 100, "text": " }\n\n #pragma omp parallel for firstprivate(dssp ,m ,k ,i ) \n for (m = 0; m < 5; m++) {\n\tfor (j = 1*3; j <= grid_points[1]-3*1-1; j++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);\n\t} #pragma omp parallel for firstprivate(j ,dssp ,m ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,j ,m ,k ,i ) ", "context_chars": 100, "text": "(ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tj = grid_points[1]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] +\n\t 6.0*ue[j][m] - 4.0*ue[j+1][m]);\n\tj = grid_points[1]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);\n } #pragma omp parallel for firstprivate(dssp ,j ,m ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "nces \nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n xi = (double)i * dnxm1;\n \n for (j = 1; j < grid_points[1]-1; j++) {\n eta = (double)j * dnym1;\n\n for (k = 0; k < grid_points[2]; k++) {\n\tzeta = (double)k * dnzm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\t#pragma omp parallel for firstprivate(m ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t ue[k][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0/dtemp[0];\n\n\t#pragma omp parallel for firstprivate(dtpp ,m ,k ,j ,i ) \n\tfor (m = 1; m <= 4; m++) {\n\t buf[k][m] = dtpp * dtemp[m];\n\t}\n\n\tcuf[k] = buf[k][3] * buf[k][3];\n\tbuf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + \n\t buf[k][2] * buf[k][2];\n\tq[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +\n\t\t buf[k][3]*ue[k][3]);\n }\n\n #pragma omp parallel for firstprivate(dz1tz1 ,tz2 ,dz2tz1 ,zzcon2 ,dz3tz1 ,dz4tz1 ,zzcon1 ,c2 ,dz5tz1 ,zzcon5 ,zzcon4 ,zzcon3 ,c1 ,k ,j ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tkm1 = k-1;\n\tkp1 = k+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tz2*( ue[kp1][3]-ue[km1][3] )+\n\t dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+\n\t zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+\n\t dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+\n\t zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+\n\t dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-\n\t\t (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+\n\t zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+\n\t dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-\n\t\t buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+\n\t 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]\n +buf[km1][0])+\n\t zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+\n\t zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+\n\t dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation \nc-------------------------------------------------------------------*/\n #pragma omp parallel for firstprivate(dssp ,m ,j ,i ) \n for (m = 0; m < 5; m++) {\n\tk = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);\n\tk = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[k-1][m] + 6.0*ue[k][m] -\n\t 4.0*ue[k+1][m] + ue[k+2][m]);\n }\n\n #pragma omp parallel for firstprivate(dssp ,m ,j ,i ) \n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for firstprivate(k ,dssp ,m ,j ,i ) \n\tfor (k = 1*3; k <= grid_points[2]-3*1-1; k++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);\n\t}\n }\n\n #pragma omp parallel for firstprivate(dssp ,k ,m ,j ,i ) \n for (m = 0; m < 5; m++) {\n\tk = grid_points[2]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m]);\n\tk = grid_points[2]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);\n }\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,j ,i ) ", "context_chars": 100, "text": "= 0; k < grid_points[2]; k++) {\n\tzeta = (double)k * dnzm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[k][m] = dtemp[m];\n\t} #pragma omp parallel for firstprivate(m ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dtpp ,m ,k ,j ,i ) ", "context_chars": 100, "text": "rivate(m ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t ue[k][m] = dtemp[m];\n\t}\n\n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m <= 4; m++) {\n\t buf[k][m] = dtpp * dtemp[m];\n\t} #pragma omp parallel for firstprivate(dtpp ,m ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dz1tz1 ,tz2 ,dz2tz1 ,zzcon2 ,dz3tz1 ,dz4tz1 ,zzcon1 ,c2 ,dz5tz1 ,zzcon5 ,zzcon4 ,zzcon3 ,c1 ,k ,j ,i ) ", "context_chars": 100, "text": "];\n\tq[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +\n\t\t buf[k][3]*ue[k][3]);\n }\n\n for (k = 1; k < grid_points[2]-1; k++) {\n\tkm1 = k-1;\n\tkp1 = k+1;\n \n\tforcing[i][j][k][0] = forcing[i][j][k][0] -\n\t tz2*( ue[kp1][3]-ue[km1][3] )+\n\t dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);\n\n\tforcing[i][j][k][1] = forcing[i][j][k][1] -\n\t tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+\n\t zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+\n\t dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);\n\n\tforcing[i][j][k][2] = forcing[i][j][k][2] -\n\t tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+\n\t zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+\n\t dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);\n\n\tforcing[i][j][k][3] = forcing[i][j][k][3] -\n\t tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-\n\t\t (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+\n\t zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+\n\t dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);\n\n\tforcing[i][j][k][4] = forcing[i][j][k][4] -\n\t tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-\n\t\t buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+\n\t 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]\n +buf[km1][0])+\n\t zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+\n\t zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+\n\t dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);\n } #pragma omp parallel for firstprivate(dz1tz1 ,tz2 ,dz2tz1 ,zzcon2 ,dz3tz1 ,dz4tz1 ,zzcon1 ,c2 ,dz5tz1 ,zzcon5 ,zzcon4 ,zzcon3 ,c1 ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,m ,j ,i ) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tk = 1;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);\n\tk = 2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (-4.0*ue[k-1][m] + 6.0*ue[k][m] -\n\t 4.0*ue[k+1][m] + ue[k+2][m]);\n } #pragma omp parallel for firstprivate(dssp ,m ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,m ,j ,i ) ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[k-1][m] + 6.0*ue[k][m] -\n\t 4.0*ue[k+1][m] + ue[k+2][m]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for firstprivate(k ,dssp ,m ,j ,i ) \n\tfor (k = 1*3; k <= grid_points[2]-3*1-1; k++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);\n\t}\n } #pragma omp parallel for firstprivate(dssp ,m ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,dssp ,m ,j ,i ) ", "context_chars": 100, "text": " }\n\n #pragma omp parallel for firstprivate(dssp ,m ,j ,i ) \n for (m = 0; m < 5; m++) {\n\tfor (k = 1*3; k <= grid_points[2]-3*1-1; k++) {\n\t forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);\n\t} #pragma omp parallel for firstprivate(k ,dssp ,m ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,k ,m ,j ,i ) ", "context_chars": 100, "text": "(ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tk = grid_points[2]-3;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] +\n\t 6.0*ue[k][m] - 4.0*ue[k+1][m]);\n\tk = grid_points[2]-2;\n\tforcing[i][j][k][m] = forcing[i][j][k][m] - dssp *\n\t (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);\n } #pragma omp parallel for firstprivate(dssp ,k ,m ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " of the forcing function, \nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "----------------------*/\n #pragma omp parallel for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];\n\t}\n }\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "agma omp parallel for firstprivate(j ,k ,m ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(zeta ,eta ,xi ,dtemp ,m ) ", "context_chars": 100, "text": "xi, eta, zeta \nc-------------------------------------------------------------------*/\n\n int m;\n\n for (m = 0; m < 5; m++) {\n dtemp[m] = ce[m][0] +\n xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7]\n\t\t\t\t\t+ xi*ce[m][10]))) +\n eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8]\n\t\t\t\t\t + eta*ce[m][11])))+\n zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] + \n\t\t\t\t\t zeta*ce[m][12])));\n } #pragma omp parallel for firstprivate(zeta ,eta ,xi ,dtemp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ing the whole thing here. \nc-------------------------------------------------------------------*/\n for (i = 0; i < IMAX; i++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (j = 0; j < IMAX; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < IMAX; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = 1.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "----------------------------------*/\n #pragma omp parallel for \n for (i = 0; i < IMAX; i++) {\n for (j = 0; j < IMAX; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < IMAX; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = 1.0;\n\t}\n }\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "+) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (j = 0; j < IMAX; j++) {\n for (k = 0; k < IMAX; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = 1.0;\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < IMAX; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t u[i][j][k][m] = 1.0;\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n \n for (k = 0; k < grid_points[2]; k++) {\n\tzeta = (double)k * dnzm1;\n \n\tfor (ix = 0; ix < 2; ix++) {\n\t exact_solution((double)ix, eta, zeta, \n &(Pface[ix][0][0]));\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " iz++) {\n\t exact_solution(xi, eta, (double)iz, \n &Pface[iz][2][0]);\n\t}\n\n\tfor (m = 0; m < 5; m++) {\n\t Pxi = xi * Pface[1][0][m] + \n\t (1.0-xi) * Pface[0][0][m];\n\t Peta = eta * Pface[1][1][m] + \n\t (1.0-eta) * Pface[0][1][m];\n\t Pzeta = zeta * Pface[1][2][m] + \n\t (1.0-zeta) * Pface[0][2][m];\n \n\t u[i][j][k][m] = Pxi + Peta + Pzeta - \n\t Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + \n\t Pxi*Peta*Pzeta;\n\t} #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[i][j][k][m] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[i][j][k][m] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[i][j][k][m] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[i][j][k][m] = temp[m];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "eft hand side for starters\nc-------------------------------------------------------------------*/\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t }\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) ", "context_chars": 100, "text": "------------------------*/\n #pragma omp parallel for \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t }\n\t}\n }\n } #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) ", "context_chars": 100, "text": "gma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t }\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) ", "context_chars": 100, "text": "ts[2]; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t for (n = 0; n < 5; n++) {\n\t lhs[i][j][k][0][m][n] = 0.0;\n\t lhs[i][j][k][1][m][n] = 0.0;\n\t lhs[i][j][k][2][m][n] = 0.0;\n\t } #pragma omp parallel for firstprivate(j ,k ,m ,n ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s overkill, but convenient\nc-------------------------------------------------------------------*/\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t lhs[i][j][k][1][m][m] = 1.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "------------------------*/\n #pragma omp parallel for \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t lhs[i][j][k][1][m][m] = 1.0;\n\t}\n }\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t lhs[i][j][k][1][m][m] = 1.0;\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": " #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t lhs[i][j][k][1][m][m] = 1.0;\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c1345 ,i ,k ,j ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) ", "context_chars": 100, "text": " for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n for (i = 0; i < grid_points[0]; i++) {\n\n\ttmp1 = 1.0 / u[i][j][k][0];\n\ttmp2 = tmp1 * tmp1;\n\ttmp3 = tmp1 * tmp2;\n/*--------------------------------------------------------------------\nc \nc-------------------------------------------------------------------*/\n\tfjac[ i][ j][ k][0][0] = 0.0;\n\tfjac[ i][ j][ k][0][1] = 1.0;\n\tfjac[ i][ j][ k][0][2] = 0.0;\n\tfjac[ i][ j][ k][0][3] = 0.0;\n\tfjac[ i][ j][ k][0][4] = 0.0;\n\n\tfjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 * \n\t\t\t\t u[i][j][k][1])\n\t + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] ) * tmp2;\n\tfjac[i][j][k][1][1] = ( 2.0 - c2 )\n\t * ( u[i][j][k][1] / u[i][j][k][0] );\n\tfjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 );\n\tfjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 );\n\tfjac[i][j][k][1][4] = c2;\n\n\tfjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2;\n\tfjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][2][3] = 0.0;\n\tfjac[i][j][k][2][4] = 0.0;\n\n\tfjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2;\n\tfjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][3][2] = 0.0;\n\tfjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][3][4] = 0.0;\n\n\tfjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t\t\t + u[i][j][k][3] * u[i][j][k][3] ) * tmp2\n\t\t\t\t- c1 * ( u[i][j][k][4] * tmp1 ) )\n\t * ( u[i][j][k][1] * tmp1 );\n\tfjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 \n\t - 0.50 * c2\n\t * ( 3.0*u[i][j][k][1]*u[i][j][k][1]\n\t + u[i][j][k][2]*u[i][j][k][2]\n\t + u[i][j][k][3]*u[i][j][k][3] ) * tmp2;\n\tfjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] )\n\t * tmp2;\n\tfjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] )\n\t * tmp2;\n\tfjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 );\n\n\tnjac[i][j][k][0][0] = 0.0;\n\tnjac[i][j][k][0][1] = 0.0;\n\tnjac[i][j][k][0][2] = 0.0;\n\tnjac[i][j][k][0][3] = 0.0;\n\tnjac[i][j][k][0][4] = 0.0;\n\n\tnjac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][1][1] = con43 * c3c4 * tmp1;\n\tnjac[i][j][k][1][2] = 0.0;\n\tnjac[i][j][k][1][3] = 0.0;\n\tnjac[i][j][k][1][4] = 0.0;\n\n\tnjac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][2][1] = 0.0;\n\tnjac[i][j][k][2][2] = c3c4 * tmp1;\n\tnjac[i][j][k][2][3] = 0.0;\n\tnjac[i][j][k][2][4] = 0.0;\n\n\tnjac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][3][1] = 0.0;\n\tnjac[i][j][k][3][2] = 0.0;\n\tnjac[i][j][k][3][3] = c3c4 * tmp1;\n\tnjac[i][j][k][3][4] = 0.0;\n\n\tnjac[i][j][k][4][0] = - ( con43 * c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][1]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))\n\t - c1345 * tmp2 * u[i][j][k][4];\n\n\tnjac[i][j][k][4][1] = ( con43 * c3c4\n\t\t\t\t- c1345 ) * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][4][4] = ( c1345 ) * tmp1;\n\n } #pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c1345 ,i ,k ,j ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c1345 ,i ,k ,j ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) ", "context_chars": 100, "text": "nd side in x direction\nc-------------------------------------------------------------------*/\n for (i = 1; i < grid_points[0]-1; i++) {\n\n\ttmp1 = dt * tx1;\n\ttmp2 = dt * tx2;\n\n\tlhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0]\n\t - tmp1 * njac[i-1][j][k][0][0]\n\t - tmp1 * dx1;\n\tlhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1]\n\t - tmp1 * njac[i-1][j][k][0][1];\n\tlhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2]\n\t - tmp1 * njac[i-1][j][k][0][2];\n\tlhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3]\n\t - tmp1 * njac[i-1][j][k][0][3];\n\tlhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4]\n\t - tmp1 * njac[i-1][j][k][0][4];\n\n\tlhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0]\n\t - tmp1 * njac[i-1][j][k][1][0];\n\tlhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1]\n\t - tmp1 * njac[i-1][j][k][1][1]\n\t - tmp1 * dx2;\n\tlhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2]\n\t - tmp1 * njac[i-1][j][k][1][2];\n\tlhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3]\n\t - tmp1 * njac[i-1][j][k][1][3];\n\tlhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4]\n\t - tmp1 * njac[i-1][j][k][1][4];\n\n\tlhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0]\n\t - tmp1 * njac[i-1][j][k][2][0];\n\tlhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1]\n\t - tmp1 * njac[i-1][j][k][2][1];\n\tlhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2]\n\t - tmp1 * njac[i-1][j][k][2][2]\n\t - tmp1 * dx3;\n\tlhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3]\n\t - tmp1 * njac[i-1][j][k][2][3];\n\tlhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4]\n\t - tmp1 * njac[i-1][j][k][2][4];\n\n\tlhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0]\n\t - tmp1 * njac[i-1][j][k][3][0];\n\tlhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1]\n\t - tmp1 * njac[i-1][j][k][3][1];\n\tlhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2]\n\t - tmp1 * njac[i-1][j][k][3][2];\n\tlhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3]\n\t - tmp1 * njac[i-1][j][k][3][3]\n\t - tmp1 * dx4;\n\tlhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4]\n\t - tmp1 * njac[i-1][j][k][3][4];\n\n\tlhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0]\n\t - tmp1 * njac[i-1][j][k][4][0];\n\tlhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1]\n\t - tmp1 * njac[i-1][j][k][4][1];\n\tlhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2]\n\t - tmp1 * njac[i-1][j][k][4][2];\n\tlhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3]\n\t - tmp1 * njac[i-1][j][k][4][3];\n\tlhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4]\n\t - tmp1 * njac[i-1][j][k][4][4]\n\t - tmp1 * dx5;\n\n\tlhs[i][j][k][BB][0][0] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][0][0]\n\t + tmp1 * 2.0 * dx1;\n\tlhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n\tlhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n\tlhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n\tlhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n\n\tlhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n\tlhs[i][j][k][BB][1][1] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][1][1]\n\t + tmp1 * 2.0 * dx2;\n\tlhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n\tlhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n\tlhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n\n\tlhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n\tlhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n\tlhs[i][j][k][BB][2][2] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][2][2]\n\t + tmp1 * 2.0 * dx3;\n\tlhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n\tlhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n\n\tlhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n\tlhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n\tlhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n\tlhs[i][j][k][BB][3][3] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][3][3]\n\t + tmp1 * 2.0 * dx4;\n\tlhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n\n\tlhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n\tlhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n\tlhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n\tlhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n\tlhs[i][j][k][BB][4][4] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][4][4]\n\t + tmp1 * 2.0 * dx5;\n\n\tlhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0]\n\t - tmp1 * njac[i+1][j][k][0][0]\n\t - tmp1 * dx1;\n\tlhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1]\n\t - tmp1 * njac[i+1][j][k][0][1];\n\tlhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2]\n\t - tmp1 * njac[i+1][j][k][0][2];\n\tlhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3]\n\t - tmp1 * njac[i+1][j][k][0][3];\n\tlhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4]\n\t - tmp1 * njac[i+1][j][k][0][4];\n\n\tlhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0]\n\t - tmp1 * njac[i+1][j][k][1][0];\n\tlhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1]\n\t - tmp1 * njac[i+1][j][k][1][1]\n\t - tmp1 * dx2;\n\tlhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2]\n\t - tmp1 * njac[i+1][j][k][1][2];\n\tlhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3]\n\t - tmp1 * njac[i+1][j][k][1][3];\n\tlhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4]\n\t - tmp1 * njac[i+1][j][k][1][4];\n\n\tlhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0]\n\t - tmp1 * njac[i+1][j][k][2][0];\n\tlhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1]\n\t - tmp1 * njac[i+1][j][k][2][1];\n\tlhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2]\n\t - tmp1 * njac[i+1][j][k][2][2]\n\t - tmp1 * dx3;\n\tlhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3]\n\t - tmp1 * njac[i+1][j][k][2][3];\n\tlhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4]\n\t - tmp1 * njac[i+1][j][k][2][4];\n\n\tlhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0]\n\t - tmp1 * njac[i+1][j][k][3][0];\n\tlhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1]\n\t - tmp1 * njac[i+1][j][k][3][1];\n\tlhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2]\n\t - tmp1 * njac[i+1][j][k][3][2];\n\tlhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3]\n\t - tmp1 * njac[i+1][j][k][3][3]\n\t - tmp1 * dx4;\n\tlhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4]\n\t - tmp1 * njac[i+1][j][k][3][4];\n\n\tlhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0]\n\t - tmp1 * njac[i+1][j][k][4][0];\n\tlhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1]\n\t - tmp1 * njac[i+1][j][k][4][1];\n\tlhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2]\n\t - tmp1 * njac[i+1][j][k][4][2];\n\tlhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3]\n\t - tmp1 * njac[i+1][j][k][4][3];\n\tlhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4]\n\t - tmp1 * njac[i+1][j][k][4][4]\n\t - tmp1 * dx5;\n\n } #pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c1345 ,i ,k ,j ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c1345 ,k ,j ,i ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) ", "context_chars": 100, "text": "mp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n\ttmp1 = 1.0 / u[i][j][k][0];\n\ttmp2 = tmp1 * tmp1;\n\ttmp3 = tmp1 * tmp2;\n\n\tfjac[ i][ j][ k][0][0] = 0.0;\n\tfjac[ i][ j][ k][0][1] = 0.0;\n\tfjac[ i][ j][ k][0][2] = 1.0;\n\tfjac[ i][ j][ k][0][3] = 0.0;\n\tfjac[ i][ j][ k][0][4] = 0.0;\n\n\tfjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] )\n\t * tmp2;\n\tfjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][1][3] = 0.0;\n\tfjac[i][j][k][1][4] = 0.0;\n\n\tfjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2)\n\t + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t\t\t * tmp2 );\n\tfjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][2][2] = ( 2.0 - c2 )\n\t * u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][2][4] = c2;\n\n\tfjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][3][1] = 0.0;\n\tfjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][3][4] = 0.0;\n\n\tfjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t\t\t+ u[i][j][k][2] * u[i][j][k][2]\n\t\t\t\t\t+ u[i][j][k][3] * u[i][j][k][3] )\n\t\t\t\t* tmp2\n\t\t\t\t- c1 * u[i][j][k][4] * tmp1 ) \n\t * u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2] \n\t * tmp2;\n\tfjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 \n\t - 0.50 * c2 \n\t * ( ( u[i][j][k][1]*u[i][j][k][1]\n\t\t + 3.0 * u[i][j][k][2]*u[i][j][k][2]\n\t\t + u[i][j][k][3]*u[i][j][k][3] )\n\t * tmp2 );\n\tfjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1; \n\n\tnjac[i][j][k][0][0] = 0.0;\n\tnjac[i][j][k][0][1] = 0.0;\n\tnjac[i][j][k][0][2] = 0.0;\n\tnjac[i][j][k][0][3] = 0.0;\n\tnjac[i][j][k][0][4] = 0.0;\n\n\tnjac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][1][1] = c3c4 * tmp1;\n\tnjac[i][j][k][1][2] = 0.0;\n\tnjac[i][j][k][1][3] = 0.0;\n\tnjac[i][j][k][1][4] = 0.0;\n\n\tnjac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][2][1] = 0.0;\n\tnjac[i][j][k][2][2] = con43 * c3c4 * tmp1;\n\tnjac[i][j][k][2][3] = 0.0;\n\tnjac[i][j][k][2][4] = 0.0;\n\n\tnjac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][3][1] = 0.0;\n\tnjac[i][j][k][3][2] = 0.0;\n\tnjac[i][j][k][3][3] = c3c4 * tmp1;\n\tnjac[i][j][k][3][4] = 0.0;\n\n\tnjac[i][j][k][4][0] = - ( c3c4\n - c1345 ) * tmp3 * (pow2(u[i][j][k][1]))\n\t - ( con43 * c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))\n\t - c1345 * tmp2 * u[i][j][k][4];\n\n\tnjac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][4][2] = ( con43 * c3c4\n\t\t\t\t- c1345 ) * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][4][4] = ( c1345 ) * tmp1;\n\n } #pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c1345 ,k ,j ,i ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c1345 ,k ,j ,i ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) ", "context_chars": 100, "text": "-------------------------------*/\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n\ttmp1 = dt * ty1;\n\ttmp2 = dt * ty2;\n\n\tlhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0]\n\t - tmp1 * njac[i][j-1][k][0][0]\n\t - tmp1 * dy1;\n\tlhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1]\n\t - tmp1 * njac[i][j-1][k][0][1];\n\tlhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2]\n\t - tmp1 * njac[i][j-1][k][0][2];\n\tlhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3]\n\t - tmp1 * njac[i][j-1][k][0][3];\n\tlhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4]\n\t - tmp1 * njac[i][j-1][k][0][4];\n\n\tlhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0]\n\t - tmp1 * njac[i][j-1][k][1][0];\n\tlhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1]\n\t - tmp1 * njac[i][j-1][k][1][1]\n\t - tmp1 * dy2;\n\tlhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2]\n\t - tmp1 * njac[i][j-1][k][1][2];\n\tlhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3]\n\t - tmp1 * njac[i][j-1][k][1][3];\n\tlhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4]\n\t - tmp1 * njac[i][j-1][k][1][4];\n\n\tlhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0]\n\t - tmp1 * njac[i][j-1][k][2][0];\n\tlhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1]\n\t - tmp1 * njac[i][j-1][k][2][1];\n\tlhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2]\n\t - tmp1 * njac[i][j-1][k][2][2]\n\t - tmp1 * dy3;\n\tlhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3]\n\t - tmp1 * njac[i][j-1][k][2][3];\n\tlhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4]\n\t - tmp1 * njac[i][j-1][k][2][4];\n\n\tlhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0]\n\t - tmp1 * njac[i][j-1][k][3][0];\n\tlhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1]\n\t - tmp1 * njac[i][j-1][k][3][1];\n\tlhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2]\n\t - tmp1 * njac[i][j-1][k][3][2];\n\tlhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3]\n\t - tmp1 * njac[i][j-1][k][3][3]\n\t - tmp1 * dy4;\n\tlhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4]\n\t - tmp1 * njac[i][j-1][k][3][4];\n\n\tlhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0]\n\t - tmp1 * njac[i][j-1][k][4][0];\n\tlhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1]\n\t - tmp1 * njac[i][j-1][k][4][1];\n\tlhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2]\n\t - tmp1 * njac[i][j-1][k][4][2];\n\tlhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3]\n\t - tmp1 * njac[i][j-1][k][4][3];\n\tlhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4]\n\t - tmp1 * njac[i][j-1][k][4][4]\n\t - tmp1 * dy5;\n\n\tlhs[i][j][k][BB][0][0] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][0][0]\n\t + tmp1 * 2.0 * dy1;\n\tlhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n\tlhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n\tlhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n\tlhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n\n\tlhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n\tlhs[i][j][k][BB][1][1] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][1][1]\n\t + tmp1 * 2.0 * dy2;\n\tlhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n\tlhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n\tlhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n\n\tlhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n\tlhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n\tlhs[i][j][k][BB][2][2] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][2][2]\n\t + tmp1 * 2.0 * dy3;\n\tlhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n\tlhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n\n\tlhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n\tlhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n\tlhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n\tlhs[i][j][k][BB][3][3] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][3][3]\n\t + tmp1 * 2.0 * dy4;\n\tlhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n\n\tlhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n\tlhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n\tlhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n\tlhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n\tlhs[i][j][k][BB][4][4] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][4][4] \n\t + tmp1 * 2.0 * dy5;\n\n\tlhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0]\n\t - tmp1 * njac[i][j+1][k][0][0]\n\t - tmp1 * dy1;\n\tlhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1]\n\t - tmp1 * njac[i][j+1][k][0][1];\n\tlhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2]\n\t - tmp1 * njac[i][j+1][k][0][2];\n\tlhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3]\n\t - tmp1 * njac[i][j+1][k][0][3];\n\tlhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4]\n\t - tmp1 * njac[i][j+1][k][0][4];\n\n\tlhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0]\n\t - tmp1 * njac[i][j+1][k][1][0];\n\tlhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1]\n\t - tmp1 * njac[i][j+1][k][1][1]\n\t - tmp1 * dy2;\n\tlhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2]\n\t - tmp1 * njac[i][j+1][k][1][2];\n\tlhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3]\n\t - tmp1 * njac[i][j+1][k][1][3];\n\tlhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4]\n\t - tmp1 * njac[i][j+1][k][1][4];\n\n\tlhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0]\n\t - tmp1 * njac[i][j+1][k][2][0];\n\tlhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1]\n\t - tmp1 * njac[i][j+1][k][2][1];\n\tlhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2]\n\t - tmp1 * njac[i][j+1][k][2][2]\n\t - tmp1 * dy3;\n\tlhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3]\n\t - tmp1 * njac[i][j+1][k][2][3];\n\tlhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4]\n\t - tmp1 * njac[i][j+1][k][2][4];\n\n\tlhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0]\n\t - tmp1 * njac[i][j+1][k][3][0];\n\tlhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1]\n\t - tmp1 * njac[i][j+1][k][3][1];\n\tlhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2]\n\t - tmp1 * njac[i][j+1][k][3][2];\n\tlhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3]\n\t - tmp1 * njac[i][j+1][k][3][3]\n\t - tmp1 * dy4;\n\tlhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4]\n\t - tmp1 * njac[i][j+1][k][3][4];\n\n\tlhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0]\n\t - tmp1 * njac[i][j+1][k][4][0];\n\tlhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1]\n\t - tmp1 * njac[i][j+1][k][4][1];\n\tlhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2]\n\t - tmp1 * njac[i][j+1][k][4][2];\n\tlhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3]\n\t - tmp1 * njac[i][j+1][k][4][3];\n\tlhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4]\n\t - tmp1 * njac[i][j+1][k][4][4]\n\t - tmp1 * dy5;\n\n }\n } #pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c1345 ,k ,j ,i ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c4 ,c3 ,c1345 ,k ,j ,i ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) ", "context_chars": 100, "text": " for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\n\ttmp1 = 1.0 / u[i][j][k][0];\n\ttmp2 = tmp1 * tmp1;\n\ttmp3 = tmp1 * tmp2;\n\n\tfjac[i][j][k][0][0] = 0.0;\n\tfjac[i][j][k][0][1] = 0.0;\n\tfjac[i][j][k][0][2] = 0.0;\n\tfjac[i][j][k][0][3] = 1.0;\n\tfjac[i][j][k][0][4] = 0.0;\n\n\tfjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) \n\t * tmp2;\n\tfjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][1][2] = 0.0;\n\tfjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][1][4] = 0.0;\n\n\tfjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][2][1] = 0.0;\n\tfjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][2][4] = 0.0;\n\n\tfjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) \n\t + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t\t + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );\n\tfjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;\n\tfjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;\n\tfjac[i][j][k][3][3] = ( 2.0 - c2 )\n\t * u[i][j][k][3] * tmp1;\n\tfjac[i][j][k][3][4] = c2;\n\n\tfjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t\t\t\t+ u[i][j][k][2] * u[i][j][k][2]\n\t\t\t\t\t+ u[i][j][k][3] * u[i][j][k][3] )\n\t\t\t\t* tmp2\n\t\t\t\t- c1 * ( u[i][j][k][4] * tmp1 ) )\n\t * ( u[i][j][k][3] * tmp1 );\n\tfjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )\n\t * tmp2;\n\tfjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )\n\t - 0.50 * c2\n\t * ( ( u[i][j][k][1]*u[i][j][k][1]\n\t\t + u[i][j][k][2]*u[i][j][k][2]\n\t\t + 3.0*u[i][j][k][3]*u[i][j][k][3] )\n\t * tmp2 );\n\tfjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;\n\n\tnjac[i][j][k][0][0] = 0.0;\n\tnjac[i][j][k][0][1] = 0.0;\n\tnjac[i][j][k][0][2] = 0.0;\n\tnjac[i][j][k][0][3] = 0.0;\n\tnjac[i][j][k][0][4] = 0.0;\n\n\tnjac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][1][1] = c3c4 * tmp1;\n\tnjac[i][j][k][1][2] = 0.0;\n\tnjac[i][j][k][1][3] = 0.0;\n\tnjac[i][j][k][1][4] = 0.0;\n\n\tnjac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][2][1] = 0.0;\n\tnjac[i][j][k][2][2] = c3c4 * tmp1;\n\tnjac[i][j][k][2][3] = 0.0;\n\tnjac[i][j][k][2][4] = 0.0;\n\n\tnjac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][3][1] = 0.0;\n\tnjac[i][j][k][3][2] = 0.0;\n\tnjac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;\n\tnjac[i][j][k][3][4] = 0.0;\n\n\tnjac[i][j][k][4][0] = - ( c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][1]))\n\t - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))\n\t - ( con43 * c3c4\n\t - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))\n\t - c1345 * tmp2 * u[i][j][k][4];\n\n\tnjac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];\n\tnjac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];\n\tnjac[i][j][k][4][3] = ( con43 * c3c4\n\t\t\t\t- c1345 ) * tmp2 * u[i][j][k][3];\n\tnjac[i][j][k][4][4] = ( c1345 )* tmp1;\n\n } #pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c4 ,c3 ,c1345 ,k ,j ,i ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c4 ,c3 ,c1345 ,k ,j ,i ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) ", "context_chars": 100, "text": "-------------------------------*/\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\n\ttmp1 = dt * tz1;\n\ttmp2 = dt * tz2;\n\n\tlhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0]\n\t - tmp1 * njac[i][j][k-1][0][0]\n\t - tmp1 * dz1;\n\tlhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1]\n\t - tmp1 * njac[i][j][k-1][0][1];\n\tlhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2]\n\t - tmp1 * njac[i][j][k-1][0][2];\n\tlhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3]\n\t - tmp1 * njac[i][j][k-1][0][3];\n\tlhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4]\n\t - tmp1 * njac[i][j][k-1][0][4];\n\n\tlhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0]\n\t - tmp1 * njac[i][j][k-1][1][0];\n\tlhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1]\n\t - tmp1 * njac[i][j][k-1][1][1]\n\t - tmp1 * dz2;\n\tlhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2]\n\t - tmp1 * njac[i][j][k-1][1][2];\n\tlhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3]\n\t - tmp1 * njac[i][j][k-1][1][3];\n\tlhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4]\n\t - tmp1 * njac[i][j][k-1][1][4];\n\n\tlhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0]\n\t - tmp1 * njac[i][j][k-1][2][0];\n\tlhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1]\n\t - tmp1 * njac[i][j][k-1][2][1];\n\tlhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2]\n\t - tmp1 * njac[i][j][k-1][2][2]\n\t - tmp1 * dz3;\n\tlhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3]\n\t - tmp1 * njac[i][j][k-1][2][3];\n\tlhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4]\n\t - tmp1 * njac[i][j][k-1][2][4];\n\n\tlhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0]\n\t - tmp1 * njac[i][j][k-1][3][0];\n\tlhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1]\n\t - tmp1 * njac[i][j][k-1][3][1];\n\tlhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2]\n\t - tmp1 * njac[i][j][k-1][3][2];\n\tlhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3]\n\t - tmp1 * njac[i][j][k-1][3][3]\n\t - tmp1 * dz4;\n\tlhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4]\n\t - tmp1 * njac[i][j][k-1][3][4];\n\n\tlhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0]\n\t - tmp1 * njac[i][j][k-1][4][0];\n\tlhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1]\n\t - tmp1 * njac[i][j][k-1][4][1];\n\tlhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2]\n\t - tmp1 * njac[i][j][k-1][4][2];\n\tlhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3]\n\t - tmp1 * njac[i][j][k-1][4][3];\n\tlhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4]\n\t - tmp1 * njac[i][j][k-1][4][4]\n\t - tmp1 * dz5;\n\n\tlhs[i][j][k][BB][0][0] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][0][0]\n\t + tmp1 * 2.0 * dz1;\n\tlhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];\n\tlhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];\n\tlhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];\n\tlhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];\n\n\tlhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];\n\tlhs[i][j][k][BB][1][1] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][1][1]\n\t + tmp1 * 2.0 * dz2;\n\tlhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];\n\tlhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];\n\tlhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];\n\n\tlhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];\n\tlhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];\n\tlhs[i][j][k][BB][2][2] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][2][2]\n\t + tmp1 * 2.0 * dz3;\n\tlhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];\n\tlhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];\n\n\tlhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];\n\tlhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];\n\tlhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];\n\tlhs[i][j][k][BB][3][3] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][3][3]\n\t + tmp1 * 2.0 * dz4;\n\tlhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];\n\n\tlhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];\n\tlhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];\n\tlhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];\n\tlhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];\n\tlhs[i][j][k][BB][4][4] = 1.0\n\t + tmp1 * 2.0 * njac[i][j][k][4][4]\n\t + tmp1 * 2.0 * dz5;\n\n\tlhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0]\n\t - tmp1 * njac[i][j][k+1][0][0]\n\t - tmp1 * dz1;\n\tlhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1]\n\t - tmp1 * njac[i][j][k+1][0][1];\n\tlhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2]\n\t - tmp1 * njac[i][j][k+1][0][2];\n\tlhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3]\n\t - tmp1 * njac[i][j][k+1][0][3];\n\tlhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4]\n\t - tmp1 * njac[i][j][k+1][0][4];\n\n\tlhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0]\n\t - tmp1 * njac[i][j][k+1][1][0];\n\tlhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1]\n\t - tmp1 * njac[i][j][k+1][1][1]\n\t - tmp1 * dz2;\n\tlhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2]\n\t - tmp1 * njac[i][j][k+1][1][2];\n\tlhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3]\n\t - tmp1 * njac[i][j][k+1][1][3];\n\tlhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4]\n\t - tmp1 * njac[i][j][k+1][1][4];\n\n\tlhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0]\n\t - tmp1 * njac[i][j][k+1][2][0];\n\tlhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1]\n\t - tmp1 * njac[i][j][k+1][2][1];\n\tlhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2]\n\t - tmp1 * njac[i][j][k+1][2][2]\n\t - tmp1 * dz3;\n\tlhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3]\n\t - tmp1 * njac[i][j][k+1][2][3];\n\tlhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4]\n\t - tmp1 * njac[i][j][k+1][2][4];\n\n\tlhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0]\n\t - tmp1 * njac[i][j][k+1][3][0];\n\tlhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1]\n\t - tmp1 * njac[i][j][k+1][3][1];\n\tlhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2]\n\t - tmp1 * njac[i][j][k+1][3][2];\n\tlhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3]\n\t - tmp1 * njac[i][j][k+1][3][3]\n\t - tmp1 * dz4;\n\tlhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4]\n\t - tmp1 * njac[i][j][k+1][3][4];\n\n\tlhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0]\n\t - tmp1 * njac[i][j][k+1][4][0];\n\tlhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1]\n\t - tmp1 * njac[i][j][k+1][4][1];\n\tlhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2]\n\t - tmp1 * njac[i][j][k+1][4][2];\n\tlhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3]\n\t - tmp1 * njac[i][j][k+1][4][3];\n\tlhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4]\n\t - tmp1 * njac[i][j][k+1][4][4]\n\t - tmp1 * dz5;\n\n }\n } #pragma omp parallel for firstprivate(c2 ,c1 ,c3c4 ,con43 ,c4 ,c3 ,c1345 ,k ,j ,i ,tmp1 ,tmp2 ,tmp3 ) lastprivate(tmp1 ,tmp2 ,tmp3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,rho_inv ,i ) ", "context_chars": 100, "text": "---------------------------------*/\n #pragma omp for \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for firstprivate(j ,k ,rho_inv ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\trho_inv = 1.0/u[i][j][k][0];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[i][j][k][1] * rho_inv;\n\tvs[i][j][k] = u[i][j][k][2] * rho_inv;\n\tws[i][j][k] = u[i][j][k][3] * rho_inv;\n\tsquare[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + \n\t\t\t\t u[i][j][k][2]*u[i][j][k][2] +\n\t\t\t\t u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n }\n } #pragma omp parallel for firstprivate(j ,k ,rho_inv ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,rho_inv ,i ) ", "context_chars": 100, "text": " omp parallel for firstprivate(j ,k ,rho_inv ,i ) \n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\trho_inv = 1.0/u[i][j][k][0];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[i][j][k][1] * rho_inv;\n\tvs[i][j][k] = u[i][j][k][2] * rho_inv;\n\tws[i][j][k] = u[i][j][k][3] * rho_inv;\n\tsquare[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + \n\t\t\t\t u[i][j][k][2]*u[i][j][k][2] +\n\t\t\t\t u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n } #pragma omp parallel for firstprivate(j ,k ,rho_inv ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "--------------------------------*/\n\n #pragma omp for \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = forcing[i][j][k][m];\n\t}\n }\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (j = 0; j < grid_points[1]; j++) {\n for (k = 0; k < grid_points[2]; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = forcing[i][j][k][m];\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": " #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < grid_points[2]; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = forcing[i][j][k][m];\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) ", "context_chars": 100, "text": "-------------------------------*/\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * \n\t (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + \n\t u[i-1][j][k][0]) -\n\t tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);\n\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * \n\t (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + \n\t u[i-1][j][k][1]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[i+1][j][k][1]*up1 - \n\t\t u[i-1][j][k][1]*um1 +\n\t\t (u[i+1][j][k][4]- square[i+1][j][k]-\n\t\t u[i-1][j][k][4]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * \n\t (u[i+1][j][k][2] - 2.0*u[i][j][k][2] +\n\t u[i-1][j][k][2]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][2]*up1 - \n\t\t u[i-1][j][k][2]*um1);\n\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * \n\t (u[i+1][j][k][3] - 2.0*u[i][j][k][3] +\n\t u[i-1][j][k][3]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][3]*up1 - \n\t\t u[i-1][j][k][3]*um1);\n\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * \n\t (u[i+1][j][k][4] - 2.0*u[i][j][k][4] +\n\t u[i-1][j][k][4]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i-1][j][k][4]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[i+1][j][k][4] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[i-1][j][k][4] - \n\t\t c2*square[i-1][j][k])*um1 );\n }\n } #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) ", "context_chars": 100, "text": "dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * \n\t (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + \n\t u[i-1][j][k][0]) -\n\t tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);\n\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * \n\t (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + \n\t u[i-1][j][k][1]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[i+1][j][k][1]*up1 - \n\t\t u[i-1][j][k][1]*um1 +\n\t\t (u[i+1][j][k][4]- square[i+1][j][k]-\n\t\t u[i-1][j][k][4]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * \n\t (u[i+1][j][k][2] - 2.0*u[i][j][k][2] +\n\t u[i-1][j][k][2]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][2]*up1 - \n\t\t u[i-1][j][k][2]*um1);\n\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * \n\t (u[i+1][j][k][3] - 2.0*u[i][j][k][3] +\n\t u[i-1][j][k][3]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[i+1][j][k][3]*up1 - \n\t\t u[i-1][j][k][3]*um1);\n\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * \n\t (u[i+1][j][k][4] - 2.0*u[i][j][k][4] +\n\t u[i-1][j][k][4]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i-1][j][k][4]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[i+1][j][k][4] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[i-1][j][k][4] - \n\t\t c2*square[i-1][j][k])*um1 );\n } #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,dssp ,j ) ", "context_chars": 100, "text": "----------------------*/\n i = 1;\n #pragma omp for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for firstprivate(k ,m ,dssp ,j ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +\n\t u[i+2][j][k][m]);\n }\n } #pragma omp parallel for firstprivate(k ,m ,dssp ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,dssp ,j ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(k ,m ,dssp ,j ) \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +\n\t u[i+2][j][k][m]);\n } #pragma omp parallel for firstprivate(k ,m ,dssp ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,dssp ,j ) ", "context_chars": 100, "text": "[m]);\n }\n }\n }\n\n i = 2;\n #pragma omp for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for firstprivate(k ,m ,dssp ,j ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);\n }\n } #pragma omp parallel for firstprivate(k ,m ,dssp ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,dssp ,j ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(k ,m ,dssp ,j ) \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);\n } #pragma omp parallel for firstprivate(k ,m ,dssp ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) ", "context_chars": 100, "text": "+2][j][k][m]);\n }\n }\n }\n\n #pragma omp for \n for (i = 3; i < grid_points[0]-3; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + \n\t u[i+2][j][k][m] );\n\t}\n }\n } #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) ", "context_chars": 100, "text": "mp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + \n\t u[i+2][j][k][m] );\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + \n\t u[i+2][j][k][m] );\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) ", "context_chars": 100, "text": " \n i = grid_points[0]-3;\n #pragma omp for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );\n }\n } #pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) ", "context_chars": 100, "text": "mp parallel for firstprivate(k ,m ,i ,dssp ,j ) \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );\n } #pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) ", "context_chars": 100, "text": " }\n }\n\n i = grid_points[0]-2;\n #pragma omp for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +\n\t 5.0*u[i][j][k][m] );\n }\n } #pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) ", "context_chars": 100, "text": "mp parallel for firstprivate(k ,m ,i ,dssp ,j ) \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +\n\t 5.0*u[i][j][k][m] );\n } #pragma omp parallel for firstprivate(k ,m ,i ,dssp ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) ", "context_chars": 100, "text": "-------------------------------*/\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * \n\t (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + \n\t u[i][j-1][k][0]) -\n\t ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * \n\t (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + \n\t u[i][j-1][k][1]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][1]*vp1 - \n\t\t u[i][j-1][k][1]*vm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * \n\t (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + \n\t u[i][j-1][k][2]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[i][j+1][k][2]*vp1 - \n\t\t u[i][j-1][k][2]*vm1 +\n\t\t (u[i][j+1][k][4] - square[i][j+1][k] - \n\t\t u[i][j-1][k][4] + square[i][j-1][k])\n\t\t *c2);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * \n\t (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + \n\t u[i][j-1][k][3]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][3]*vp1 - \n\t\t u[i][j-1][k][3]*vm1);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * \n\t (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + \n\t u[i][j-1][k][4]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j-1][k][4]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[i][j+1][k][4] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[i][j-1][k][4] - \n\t\t c2*square[i][j-1][k]) * vm1);\n }\n } #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) ", "context_chars": 100, "text": "dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * \n\t (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + \n\t u[i][j-1][k][0]) -\n\t ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * \n\t (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + \n\t u[i][j-1][k][1]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][1]*vp1 - \n\t\t u[i][j-1][k][1]*vm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * \n\t (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + \n\t u[i][j-1][k][2]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[i][j+1][k][2]*vp1 - \n\t\t u[i][j-1][k][2]*vm1 +\n\t\t (u[i][j+1][k][4] - square[i][j+1][k] - \n\t\t u[i][j-1][k][4] + square[i][j-1][k])\n\t\t *c2);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * \n\t (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + \n\t u[i][j-1][k][3]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[i][j+1][k][3]*vp1 - \n\t\t u[i][j-1][k][3]*vm1);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * \n\t (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + \n\t u[i][j-1][k][4]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j-1][k][4]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[i][j+1][k][4] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[i][j-1][k][4] - \n\t\t c2*square[i][j-1][k]) * vm1);\n } #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,dssp ,i ) ", "context_chars": 100, "text": "----------------------*/\n j = 1;\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for firstprivate(k ,m ,dssp ,i ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +\n\t u[i][j+2][k][m]);\n }\n } #pragma omp parallel for firstprivate(k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,dssp ,i ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(k ,m ,dssp ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +\n\t u[i][j+2][k][m]);\n } #pragma omp parallel for firstprivate(k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,dssp ,i ) ", "context_chars": 100, "text": "[m]);\n }\n }\n }\n\n j = 2;\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for firstprivate(k ,m ,dssp ,i ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);\n }\n } #pragma omp parallel for firstprivate(k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,dssp ,i ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(k ,m ,dssp ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);\n } #pragma omp parallel for firstprivate(k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) ", "context_chars": 100, "text": "][j+2][k][m]);\n }\n }\n }\n\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 3; j < grid_points[1]-3; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + \n\t u[i][j+2][k][m] );\n\t}\n }\n } #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) ", "context_chars": 100, "text": "mp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n for (j = 3; j < grid_points[1]-3; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + \n\t u[i][j+2][k][m] );\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + \n\t u[i][j+2][k][m] );\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) ", "context_chars": 100, "text": " \n j = grid_points[1]-3;\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );\n }\n } #pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) ", "context_chars": 100, "text": "mp parallel for firstprivate(k ,m ,j ,dssp ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );\n } #pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) ", "context_chars": 100, "text": " }\n }\n\n j = grid_points[1]-2;\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n #pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +\n\t 5.*u[i][j][k][m] );\n }\n } #pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) ", "context_chars": 100, "text": "mp parallel for firstprivate(k ,m ,j ,dssp ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +\n\t 5.*u[i][j][k][m] );\n } #pragma omp parallel for firstprivate(k ,m ,j ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) ", "context_chars": 100, "text": "-------------------------------*/\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * \n\t (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + \n\t u[i][j][k-1][0]) -\n\t tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * \n\t (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + \n\t u[i][j][k-1][1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][1]*wp1 - \n\t\t u[i][j][k-1][1]*wm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * \n\t (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + \n\t u[i][j][k-1][2]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][2]*wp1 - \n\t\t u[i][j][k-1][2]*wm1);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * \n\t (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + \n\t u[i][j][k-1][3]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[i][j][k+1][3]*wp1 - \n\t\t u[i][j][k-1][3]*wm1 +\n\t\t (u[i][j][k+1][4] - square[i][j][k+1] - \n\t\t u[i][j][k-1][4] + square[i][j][k-1])\n\t\t *c2);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * \n\t (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + \n\t u[i][j][k-1][4]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j][k-1][4]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[i][j][k+1][4] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[i][j][k-1][4] - \n\t\t c2*square[i][j][k-1])*wm1);\n }\n } #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) ", "context_chars": 100, "text": ",con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * \n\t (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + \n\t u[i][j][k-1][0]) -\n\t tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);\n\trhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * \n\t (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + \n\t u[i][j][k-1][1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][1]*wp1 - \n\t\t u[i][j][k-1][1]*wm1);\n\trhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * \n\t (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + \n\t u[i][j][k-1][2]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[i][j][k+1][2]*wp1 - \n\t\t u[i][j][k-1][2]*wm1);\n\trhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * \n\t (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + \n\t u[i][j][k-1][3]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[i][j][k+1][3]*wp1 - \n\t\t u[i][j][k-1][3]*wm1 +\n\t\t (u[i][j][k+1][4] - square[i][j][k+1] - \n\t\t u[i][j][k-1][4] + square[i][j][k-1])\n\t\t *c2);\n\trhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * \n\t (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + \n\t u[i][j][k-1][4]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - \n\t\t 2.0*u[i][j][k][4]*rho_i[i][j][k] +\n\t\t u[i][j][k-1][4]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[i][j][k+1][4] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[i][j][k-1][4] - \n\t\t c2*square[i][j][k-1])*wm1);\n } #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,m ,dssp ,i ) ", "context_chars": 100, "text": "----------------------*/\n k = 1;\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,m ,dssp ,i ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +\n\t u[i][j][k+2][m]);\n }\n } #pragma omp parallel for firstprivate(j ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,m ,dssp ,i ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(j ,m ,dssp ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m]- dssp * \n\t ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +\n\t u[i][j][k+2][m]);\n } #pragma omp parallel for firstprivate(j ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,m ,dssp ,i ) ", "context_chars": 100, "text": "[m]);\n }\n }\n }\n\n k = 2;\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,m ,dssp ,i ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);\n }\n } #pragma omp parallel for firstprivate(j ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,m ,dssp ,i ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(j ,m ,dssp ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -\n\t 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);\n } #pragma omp parallel for firstprivate(j ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) ", "context_chars": 100, "text": "][j][k+2][m]);\n }\n }\n }\n\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n for (k = 3; k < grid_points[2]-3; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + \n\t u[i][j][k+2][m] );\n\t}\n }\n } #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) ", "context_chars": 100, "text": "mp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 3; k < grid_points[2]-3; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + \n\t u[i][j][k+2][m] );\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) ", "context_chars": 100, "text": "a omp parallel for firstprivate(j ,k ,m ,dssp ,i ) \n for (k = 3; k < grid_points[2]-3; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * \n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + \n\t u[i][j][k+2][m] );\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) ", "context_chars": 100, "text": " \n k = grid_points[2]-3;\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );\n }\n } #pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) ", "context_chars": 100, "text": "mp parallel for firstprivate(j ,m ,k ,dssp ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + \n\t 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );\n } #pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) ", "context_chars": 100, "text": " }\n }\n\n k = grid_points[2]-2;\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) \n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +\n\t 5.0*u[i][j][k][m] );\n }\n } #pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) ", "context_chars": 100, "text": "mp parallel for firstprivate(j ,m ,k ,dssp ,i ) \n for (j = 1; j < grid_points[1]-1; j++) {\n for (m = 0; m < 5; m++) {\n\trhs[i][j][k][m] = rhs[i][j][k][m] - dssp *\n\t ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +\n\t 5.0*u[i][j][k][m] );\n } #pragma omp parallel for firstprivate(j ,m ,k ,dssp ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dt ,m ) ", "context_chars": 100, "text": "----------------------------------------*/\n error_norm(xce);\n compute_rhs();\n\n rhs_norm(xcr);\n\n for (m = 0; m < 5; m++) {\n xcr[m] = xcr[m] / dt;\n } #pragma omp parallel for firstprivate(dt ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ) ", "context_chars": 100, "text": " \n for (m = 0; m < 5; m++) {\n xcr[m] = xcr[m] / dt;\n }\n\n *class = 'U';\n *verified = TRUE;\n\n for (m = 0; m < 5; m++) {\n xcrref[m] = 1.0;\n xceref[m] = 1.0;\n } #pragma omp parallel for firstprivate(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ) ", "context_chars": 100, "text": "he known reference values.\nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n \n xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);\n xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);\n \n } #pragma omp parallel for firstprivate(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,n ,j ,i ) ", "context_chars": 100, "text": "points[0]-2; i >= 0; i--) {\n #pragma omp for \n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for firstprivate(m ,k ,n ,j ,i ) \n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m]\n\t - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];\n\t }\n\t}\n } #pragma omp parallel for firstprivate(m ,k ,n ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,n ,j ,i ) ", "context_chars": 100, "text": "agma omp parallel for firstprivate(m ,k ,n ,j ,i ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m]\n\t - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];\n\t }\n\t} #pragma omp parallel for firstprivate(m ,k ,n ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,n ,i ,j ) ", "context_chars": 100, "text": "points[1]-2; j >= 0; j--) {\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (k = 1; k < grid_points[2]-1; k++) {\n\t#pragma omp parallel for firstprivate(m ,k ,n ,i ,j ) \n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] \n\t - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];\n\t }\n\t}\n } #pragma omp parallel for firstprivate(m ,k ,n ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,n ,i ,j ) ", "context_chars": 100, "text": "agma omp parallel for firstprivate(m ,k ,n ,i ,j ) \n for (k = 1; k < grid_points[2]-1; k++) {\n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] \n\t - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];\n\t }\n\t} #pragma omp parallel for firstprivate(m ,k ,n ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/BT/bt.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,j ,k ,n ,i ) ", "context_chars": 100, "text": "--------*/\n\n int i, j, k, m, n;\n\n #pragma omp for \n for (i = 1; i < grid_points[0]-1; i++) {\n for (j = 1; j < grid_points[1]-1; j++) {\n for (k = grid_points[2]-2; k >= 0; k--) {\n\tfor (m = 0; m < BLOCK_SIZE; m++) {\n\t for (n = 0; n < BLOCK_SIZE; n++) {\n\t rhs[i][j][k][m] = rhs[i][j][k][m] \n\t - lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n];\n\t }\n\t}\n }\n } #pragma omp parallel for firstprivate(m ,j ,k ,n ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ) ", "context_chars": 100, "text": "lastcol-firstcol+1)\nc---------------------------------------------------------------------*/\n{\t\n for (j = 1; j <= lastrow - firstrow + 1; j++) {\n\t#pragma omp parallel for private(i ,j ,k ) \n\tfor (k = rowstr[j]; k < rowstr[j+1]; k++) {\n colidx[k] = colidx[k] - firstcol + 1;\n\t}\n } #pragma omp parallel for private(i ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ) ", "context_chars": 100, "text": " #pragma omp parallel for private(i ,j ,k ) \n for (j = 1; j <= lastrow - firstrow + 1; j++) {\n\tfor (k = rowstr[j]; k < rowstr[j+1]; k++) {\n colidx[k] = colidx[k] - firstcol + 1;\n\t} #pragma omp parallel for private(i ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ) ", "context_chars": 100, "text": "vector to (1, 1, .... 1)\nc-------------------------------------------------------------------*/\n for (i = 1; i <= NA+1; i++) {\n\tx[i] = 1.0;\n } #pragma omp parallel for firstprivate(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ) reduction(+:norm_temp11) reduction(+:norm_temp12) ", "context_chars": 100, "text": "ragma omp parallel for firstprivate(j ) \n for (i = 1; i <= NA+1; i++) {\n\tx[i] = 1.0;\n }\n for (j = 1; j <= lastcol-firstcol+1; j++) {\n q[j] = 0.0;\n z[j] = 0.0;\n r[j] = 0.0;\n p[j] = 0.0;\n } #pragma omp parallel for private(j ) reduction(+:norm_temp11) reduction(+:norm_temp12) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "t timing, to niter its)\nc-------------------------------------------------------------------*/\n\n for (it = 1; it <= 1; it++) {\n\n/*--------------------------------------------------------------------\nc The call to the conjugate gradient routine:\nc-------------------------------------------------------------------*/\n\tconj_grad (colidx, rowstr, x, z, a, p, q, r,/* w,*/ &rnorm);\n\n/*--------------------------------------------------------------------\nc zeta = shift + 1/(x.z)\nc So, first: (x.z)\nc Also, find norm of z\nc So, first: (z.z)\nc-------------------------------------------------------------------*/\n\tnorm_temp11 = 0.0;\n\tnorm_temp12 = 0.0;\n\tfor (j = 1; j <= lastcol-firstcol+1; j++) {\n norm_temp11 = norm_temp11 + x[j]*z[j];\n norm_temp12 = norm_temp12 + z[j]*z[j];\n\t}\n\tnorm_temp12 = 1.0 / sqrt( norm_temp12 );\n\n/*--------------------------------------------------------------------\nc Normalize z to obtain x\nc-------------------------------------------------------------------*/\n\tfor (j = 1; j <= lastcol-firstcol+1; j++) {\n x[j] = norm_temp12*z[j];\n\t}\n\t\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ,sum ) ", "context_chars": 100, "text": "lize the CG algorithm:\nc-------------------------------------------------------------------*/\n{\n for (j = 1; j <= naa+1; j++) {\n\tq[j] = 0.0;\n\tz[j] = 0.0;\n\tr[j] = x[j];\n\tp[j] = r[j];\n\t//w[j] = 0.0;\n } #pragma omp parallel for private(j ,sum ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ,sum ) reduction(+:rho) ", "context_chars": 100, "text": "of r elements locally...\nc-------------------------------------------------------------------*/\n for (j = 1; j <= lastcol-firstcol+1; j++) {\n\trho = rho + r[j]*r[j];\n } #pragma omp parallel for private(j ,sum ) reduction(+:rho) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ,sum ,alpha ,beta ) ", "context_chars": 100, "text": "C on the Cray t3d - overall speed of code is 1.5 times faster.\n*/\n\n/* rolled version */ \n\tfor (j = 1; j <= lastrow-firstrow+1; j++) {\n sum = 0.0;\n\t #pragma omp parallel for firstprivate(sum ,k ,rowstr ,colidx ,p ,a ,q ,j ,cgit ) \n\t for (k = rowstr[j]; k < rowstr[j+1]; k++) {\n\t\tsum = sum + a[k]*p[colidx[k]];\n\t }\n //w[j] = sum;\n q[j] = sum;\n\t} #pragma omp parallel for private(j ,k ,sum ,alpha ,beta ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(sum ,k ,rowstr ,colidx ,p ,a ,q ,j ,cgit ) ", "context_chars": 100, "text": "(j ,k ,sum ,alpha ,beta ) \n\tfor (j = 1; j <= lastrow-firstrow+1; j++) {\n sum = 0.0;\n\t for (k = rowstr[j]; k < rowstr[j+1]; k++) {\n\t\tsum = sum + a[k]*p[colidx[k]];\n\t } #pragma omp parallel for firstprivate(sum ,k ,rowstr ,colidx ,p ,a ,q ,j ,cgit ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ,sum ,alpha ,beta ) reduction(+:d) ", "context_chars": 100, "text": "-------------\nc Obtain p.q\nc-------------------------------------------------------------------*/\n\tfor (j = 1; j <= lastcol-firstcol+1; j++) {\n d = d + p[j]*q[j];\n\t} #pragma omp parallel for private(j ,k ,sum ,alpha ,beta ) reduction(+:d) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ,sum ,alpha ,beta ) reduction(+:rho) ", "context_chars": 100, "text": "c and r = r - alpha*q\nc---------------------------------------------------------------------*/\n\tfor (j = 1; j <= lastcol-firstcol+1; j++) {\n z[j] = z[j] + alpha*p[j];\n r[j] = r[j] - alpha*q[j];\n//\t} #pragma omp parallel for private(j ,k ,sum ,alpha ,beta ) reduction(+:rho) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ,k ,sum ,alpha ,beta ) reduction(+:rho) ", "context_chars": 100, "text": "---------\nc p = r + beta*p\nc-------------------------------------------------------------------*/\n\tfor (j = 1; j <= lastcol-firstcol+1; j++) {\n p[j] = r[j] + beta*p[j];\n\t} #pragma omp parallel for private(j ,k ,sum ,alpha ,beta ) reduction(+:rho) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ,d ) ", "context_chars": 100, "text": "\nc---------------------------------------------------------------------*/\n sum = 0.0;\n \n{\n for (j = 1; j <= lastrow-firstrow+1; j++) {\n\td = 0.0;\n\t#pragma omp parallel for firstprivate(d ,k ,rowstr ,colidx ,z ,a ,r ,j ) \n\tfor (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {\n d = d + a[k]*z[colidx[k]];\n\t}\n\tr[j] = d;\n } #pragma omp parallel for private(j ,d ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d ,k ,rowstr ,colidx ,z ,a ,r ,j ) ", "context_chars": 100, "text": "#pragma omp parallel for private(j ,d ) \n for (j = 1; j <= lastrow-firstrow+1; j++) {\n\td = 0.0;\n\tfor (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {\n d = d + a[k]*z[colidx[k]];\n\t} #pragma omp parallel for firstprivate(d ,k ,rowstr ,colidx ,z ,a ,r ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ,d ) reduction(+:sum) ", "context_chars": 100, "text": "is point, r contains A.z\nc-------------------------------------------------------------------*/\n for (j = 1; j <= lastcol-firstcol+1; j++) {\n\td = x[j] - r[j];\n\tsum = sum + d*d;\n } #pragma omp parallel for private(j ,d ) reduction(+:sum) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "mark nonzero positions\nc---------------------------------------------------------------------*/\n for (i = 1; i <= n; i++) {\n\tcolidx[n+i] = 0;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(j ) ", "context_chars": 100, "text": "r of triples in each row\nc-------------------------------------------------------------------*/\n for (j = 1; j <= n; j++) {\n\trowstr[j] = 0;\n\tmark[j] = FALSE;\n } #pragma omp parallel for private(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(k ,j ) ", "context_chars": 100, "text": ". preload data pages\nc---------------------------------------------------------------------*/\n for(j = 0;j <= nrows-1;j++) {\n #pragma omp parallel for firstprivate(a ,k ,j ) \n for(k = rowstr[j];k <= rowstr[j+1]-1;k++)\n\t a[k] = 0.0;\n } #pragma omp parallel for private(k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a ,k ,j ) ", "context_chars": 100, "text": "-----*/\n #pragma omp parallel for private(k ,j ) \n for(j = 0;j <= nrows-1;j++) {\n for(k = rowstr[j];k <= rowstr[j+1]-1;k++)\n\t a[k] = 0.0;\n }\n/*--------------------------------------------------------------------\nc ... do a bucket sort of the triples on the row index\nc-------------------------------------------------------------------*/\n for (nza = 1; nza <= nnza; nza++) {\n\tj = arow[nza] - firstrow + 1;\n\tk = rowstr[j];\n\ta[k] = aelt[nza];\n\tcolidx[k] = acol[nza];\n\trowstr[j] = rowstr[j] + 1;\n } #pragma omp parallel for firstprivate(a ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "ng elements\nc-------------------------------------------------------------------*/\n nza = 0;\n for (i = 1; i <= n; i++) {\n\tx[i] = 0.0;\n\tmark[i] = FALSE;\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,m ,ldz ,k ,v ,omega ,jst ,jend ,i ) ", "context_chars": 100, "text": "\n double tmp, tmp1;\n double tmat[5][5];\n\n #pragma omp for \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,m ,ldz ,k ,v ,omega ,jst ,jend ,i ) \n for (m = 0; m < 5; m++) {\n\tv[i][j][k][m] = v[i][j][k][m]\n\t - omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]\n\t\t + ldz[i][j][m][1] * v[i][j][k-1][1]\n\t\t + ldz[i][j][m][2] * v[i][j][k-1][2]\n\t\t + ldz[i][j][m][3] * v[i][j][k-1][3]\n\t\t + ldz[i][j][m][4] * v[i][j][k-1][4] );\n }\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,m ,ldz ,k ,v ,omega ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,m ,ldz ,k ,v ,omega ,jst ,jend ,i ) ", "context_chars": 100, "text": "rivate(iend ,ist ,j ,m ,ldz ,k ,v ,omega ,jst ,jend ,i ) \n for (j = jst; j <= jend; j++) {\n for (m = 0; m < 5; m++) {\n\tv[i][j][k][m] = v[i][j][k][m]\n\t - omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]\n\t\t + ldz[i][j][m][1] * v[i][j][k-1][1]\n\t\t + ldz[i][j][m][2] * v[i][j][k-1][2]\n\t\t + ldz[i][j][m][3] * v[i][j][k-1][3]\n\t\t + ldz[i][j][m][4] * v[i][j][k-1][4] );\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,m ,ldz ,k ,v ,omega ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ist ,iend ,j ,m ,udz ,k ,v ,omega ,tv ,jst ,jend ,i ) ", "context_chars": 100, "text": "\n double tmp, tmp1;\n double tmat[5][5];\n\n #pragma omp for \n for (i = iend; i >= ist; i--) {\n for (j = jend; j >= jst; j--) {\n #pragma omp parallel for firstprivate(ist ,iend ,j ,m ,udz ,k ,v ,omega ,tv ,jst ,jend ,i ) \n for (m = 0; m < 5; m++) {\n\ttv[i][j][m] = \n\t omega * ( udz[i][j][m][0] * v[i][j][k+1][0]\n\t\t + udz[i][j][m][1] * v[i][j][k+1][1]\n\t\t + udz[i][j][m][2] * v[i][j][k+1][2]\n\t\t + udz[i][j][m][3] * v[i][j][k+1][3]\n\t\t + udz[i][j][m][4] * v[i][j][k+1][4] );\n }\n } #pragma omp parallel for firstprivate(ist ,iend ,j ,m ,udz ,k ,v ,omega ,tv ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ist ,iend ,j ,m ,udz ,k ,v ,omega ,tv ,jst ,jend ,i ) ", "context_chars": 100, "text": "te(ist ,iend ,j ,m ,udz ,k ,v ,omega ,tv ,jst ,jend ,i ) \n for (j = jend; j >= jst; j--) {\n for (m = 0; m < 5; m++) {\n\ttv[i][j][m] = \n\t omega * ( udz[i][j][m][0] * v[i][j][k+1][0]\n\t\t + udz[i][j][m][1] * v[i][j][k+1][1]\n\t\t + udz[i][j][m][2] * v[i][j][k+1][2]\n\t\t + udz[i][j][m][3] * v[i][j][k+1][3]\n\t\t + udz[i][j][m][4] * v[i][j][k+1][4] );\n } #pragma omp parallel for firstprivate(ist ,iend ,j ,m ,udz ,k ,v ,omega ,tv ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "uble u21jm1, u31jm1, u41jm1, u51jm1;\n double u21km1, u31km1, u41km1, u51km1;\n\n dsspm = dssp;\n\n for (i = 0; i < nx; i++) {\n #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) \n for (j = 0; j < ny; j++) {\n #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) \n for (k = 0; k < nz; k++) {\n\t#pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) ", "context_chars": 100, "text": "km1, u41km1, u51km1;\n\n dsspm = dssp;\n\n #pragma omp parallel for \n for (i = 0; i < nx; i++) {\n for (j = 0; j < ny; j++) {\n #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) \n for (k = 0; k < nz; k++) {\n\t#pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t}\n }\n } #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) \n for (j = 0; j < ny; j++) {\n for (k = 0; k < nz; k++) {\n\t#pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t}\n } #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) ", "context_chars": 100, "text": " #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) \n for (k = 0; k < nz; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t} #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "m ,k ,j ,nz ,ny ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = 0.0;\n\t}\n }\n }\n }\n\n for (i = 0; i < nx; i++) {\n iglob = i;\n xi = ( (double)(iglob) ) / ( nx0 - 1 );\n #pragma omp parallel for firstprivate(nx ,m ,k ,j ,xi ,eta ,zeta ,nx0 ,ny0 ,nz ,ny ,i ) \n for (j = 0; j < ny; j++) {\n jglob = j;\n eta = ( (double)(jglob) ) / ( ny0 - 1 );\n #pragma omp parallel for firstprivate(nx ,m ,k ,j ,xi ,eta ,zeta ,nx0 ,ny0 ,nz ,ny ,i ) \n for (k = 0; k < nz; k++) {\n\tzeta = ( (double)(k) ) / ( nz - 1 );\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = ce[m][0]\n\t + ce[m][1] * xi\n\t + ce[m][2] * eta\n\t + ce[m][3] * zeta\n\t + ce[m][4] * xi * xi\n\t + ce[m][5] * eta * eta\n\t + ce[m][6] * zeta * zeta\n\t + ce[m][7] * xi * xi * xi\n\t + ce[m][8] * eta * eta * eta\n\t + ce[m][9] * zeta * zeta * zeta\n\t + ce[m][10] * xi * xi * xi * xi\n\t + ce[m][11] * eta * eta * eta * eta\n\t + ce[m][12] * zeta * zeta * zeta * zeta;\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nx ,m ,k ,j ,xi ,eta ,zeta ,nx0 ,ny0 ,nz ,ny ,i ) ", "context_chars": 100, "text": "el for \n for (i = 0; i < nx; i++) {\n iglob = i;\n xi = ( (double)(iglob) ) / ( nx0 - 1 );\n for (j = 0; j < ny; j++) {\n jglob = j;\n eta = ( (double)(jglob) ) / ( ny0 - 1 );\n #pragma omp parallel for firstprivate(nx ,m ,k ,j ,xi ,eta ,zeta ,nx0 ,ny0 ,nz ,ny ,i ) \n for (k = 0; k < nz; k++) {\n\tzeta = ( (double)(k) ) / ( nz - 1 );\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = ce[m][0]\n\t + ce[m][1] * xi\n\t + ce[m][2] * eta\n\t + ce[m][3] * zeta\n\t + ce[m][4] * xi * xi\n\t + ce[m][5] * eta * eta\n\t + ce[m][6] * zeta * zeta\n\t + ce[m][7] * xi * xi * xi\n\t + ce[m][8] * eta * eta * eta\n\t + ce[m][9] * zeta * zeta * zeta\n\t + ce[m][10] * xi * xi * xi * xi\n\t + ce[m][11] * eta * eta * eta * eta\n\t + ce[m][12] * zeta * zeta * zeta * zeta;\n\t}\n }\n } #pragma omp parallel for firstprivate(nx ,m ,k ,j ,xi ,eta ,zeta ,nx0 ,ny0 ,nz ,ny ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nx ,m ,k ,j ,xi ,eta ,zeta ,nx0 ,ny0 ,nz ,ny ,i ) ", "context_chars": 100, "text": " for (j = 0; j < ny; j++) {\n jglob = j;\n eta = ( (double)(jglob) ) / ( ny0 - 1 );\n for (k = 0; k < nz; k++) {\n\tzeta = ( (double)(k) ) / ( nz - 1 );\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = ce[m][0]\n\t + ce[m][1] * xi\n\t + ce[m][2] * eta\n\t + ce[m][3] * zeta\n\t + ce[m][4] * xi * xi\n\t + ce[m][5] * eta * eta\n\t + ce[m][6] * zeta * zeta\n\t + ce[m][7] * xi * xi * xi\n\t + ce[m][8] * eta * eta * eta\n\t + ce[m][9] * zeta * zeta * zeta\n\t + ce[m][10] * xi * xi * xi * xi\n\t + ce[m][11] * eta * eta * eta * eta\n\t + ce[m][12] * zeta * zeta * zeta * zeta;\n\t}\n } #pragma omp parallel for firstprivate(nx ,m ,k ,j ,xi ,eta ,zeta ,nx0 ,ny0 ,nz ,ny ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s\n--------------------------------------------------------------------*/\n\n L1 = 0;\n L2 = nx-1;\n\n for (i = L1; i <= L2; i++) {\n #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) \n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) \n for (k = 1; k < nz - 1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][1];\n\tu21 = rsd[i][j][k][1] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u21;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) ", "context_chars": 100, "text": "---------*/\n\n L1 = 0;\n L2 = nx-1;\n\n #pragma omp parallel for \n for (i = L1; i <= L2; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) \n for (k = 1; k < nz - 1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][1];\n\tu21 = rsd[i][j][k][1] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u21;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;\n }\n } #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) ", "context_chars": 100, "text": "or firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) \n for (j = jst; j <= jend; j++) {\n for (k = 1; k < nz - 1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][1];\n\tu21 = rsd[i][j][k][1] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u21;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;\n } #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;\n }\n }\n }\n\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21i = tmp * rsd[i][j][k][1];\n\tu31i = tmp * rsd[i][j][k][2];\n\tu41i = tmp * rsd[i][j][k][3];\n\tu51i = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i-1][j][k][0];\n\n\tu21im1 = tmp * rsd[i-1][j][k][1];\n\tu31im1 = tmp * rsd[i-1][j][k][2];\n\tu41im1 = tmp * rsd[i-1][j][k][3];\n\tu51im1 = tmp * rsd[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * \n\t ( u21i - u21im1 );\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )\n\t\t - ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )\n\t + (1.0/6.0)\n\t * tx3 * ( u21i*u21i - u21im1*u21im1 )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dx1 * tx1 * ( rsd[i-1][j][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + \t rsd[i+1][j][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( rsd[i-1][j][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i+1][j][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( rsd[i-1][j][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i+1][j][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( rsd[i-1][j][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i+1][j][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( rsd[i-1][j][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i+1][j][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[1][j][k][m] = frct[1][j][k][m]\n\t - dsspm * ( + 5.0 * rsd[1][j][k][m]\n\t\t - 4.0 * rsd[2][j][k][m]\n\t\t + rsd[3][j][k][m] );\n\tfrct[2][j][k][m] = frct[2][j][k][m]\n\t - dsspm * ( - 4.0 * rsd[1][j][k][m]\n\t\t + 6.0 * rsd[2][j][k][m]\n\t\t - 4.0 * rsd[3][j][k][m]\n\t\t + rsd[4][j][k][m] );\n }\n\n ist1 = 3;\n iend1 = nx - 4;\n for (i = ist1; i <=iend1; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i-2][j][k][m]\n\t\t\t\t - 4.0 * rsd[i-1][j][k][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i+1][j][k][m]\n\t\t\t\t + rsd[i+2][j][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[nx-3][j][k][m] = frct[nx-3][j][k][m]\n\t - dsspm * ( rsd[nx-5][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-4][j][k][m]\n\t\t\t\t + 6.0 * rsd[nx-3][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-2][j][k][m] );\n\tfrct[nx-2][j][k][m] = frct[nx-2][j][k][m]\n\t - dsspm * ( rsd[nx-4][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-3][j][k][m]\n\t\t\t\t + 5.0 * rsd[nx-2][j][k][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) ", "context_chars": 100, "text": "2 * q ) * u21;\n }\n }\n }\n\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21i = tmp * rsd[i][j][k][1];\n\tu31i = tmp * rsd[i][j][k][2];\n\tu41i = tmp * rsd[i][j][k][3];\n\tu51i = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i-1][j][k][0];\n\n\tu21im1 = tmp * rsd[i-1][j][k][1];\n\tu31im1 = tmp * rsd[i-1][j][k][2];\n\tu41im1 = tmp * rsd[i-1][j][k][3];\n\tu51im1 = tmp * rsd[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * \n\t ( u21i - u21im1 );\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )\n\t\t - ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )\n\t + (1.0/6.0)\n\t * tx3 * ( u21i*u21i - u21im1*u21im1 )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dx1 * tx1 * ( rsd[i-1][j][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + \t rsd[i+1][j][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( rsd[i-1][j][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i+1][j][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( rsd[i-1][j][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i+1][j][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( rsd[i-1][j][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i+1][j][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( rsd[i-1][j][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i+1][j][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[1][j][k][m] = frct[1][j][k][m]\n\t - dsspm * ( + 5.0 * rsd[1][j][k][m]\n\t\t - 4.0 * rsd[2][j][k][m]\n\t\t + rsd[3][j][k][m] );\n\tfrct[2][j][k][m] = frct[2][j][k][m]\n\t - dsspm * ( - 4.0 * rsd[1][j][k][m]\n\t\t + 6.0 * rsd[2][j][k][m]\n\t\t - 4.0 * rsd[3][j][k][m]\n\t\t + rsd[4][j][k][m] );\n }\n\n ist1 = 3;\n iend1 = nx - 4;\n for (i = ist1; i <=iend1; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i-2][j][k][m]\n\t\t\t\t - 4.0 * rsd[i-1][j][k][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i+1][j][k][m]\n\t\t\t\t + rsd[i+2][j][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[nx-3][j][k][m] = frct[nx-3][j][k][m]\n\t - dsspm * ( rsd[nx-5][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-4][j][k][m]\n\t\t\t\t + 6.0 * rsd[nx-3][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-2][j][k][m] );\n\tfrct[nx-2][j][k][m] = frct[nx-2][j][k][m]\n\t - dsspm * ( rsd[nx-4][j][k][m]\n\t\t\t\t - 4.0 * rsd[nx-3][j][k][m]\n\t\t\t\t + 5.0 * rsd[nx-2][j][k][m] );\n }\n } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) ", "context_chars": 100, "text": " ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n for (k = 1; k <= nz - 2; k++) {\n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) ", "context_chars": 100, "text": ",L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t} #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) ", "context_chars": 100, "text": "][m] = frct[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21i = tmp * rsd[i][j][k][1];\n\tu31i = tmp * rsd[i][j][k][2];\n\tu41i = tmp * rsd[i][j][k][3];\n\tu51i = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i-1][j][k][0];\n\n\tu21im1 = tmp * rsd[i-1][j][k][1];\n\tu31im1 = tmp * rsd[i-1][j][k][2];\n\tu41im1 = tmp * rsd[i-1][j][k][3];\n\tu51im1 = tmp * rsd[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * \n\t ( u21i - u21im1 );\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )\n\t\t - ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )\n\t + (1.0/6.0)\n\t * tx3 * ( u21i*u21i - u21im1*u21im1 )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) ", "context_chars": 100, "text": ".0)\n\t * tx3 * ( u21i*u21i - u21im1*u21im1 )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n for (i = ist; i <= iend; i++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dx1 * tx1 * ( rsd[i-1][j][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + \t rsd[i+1][j][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( rsd[i-1][j][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i+1][j][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( rsd[i-1][j][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i+1][j][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( rsd[i-1][j][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i+1][j][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( rsd[i-1][j][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i+1][j][k][4] );\n } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s\n--------------------------------------------------------------------*/\n\n L1 = 0;\n L2 = ny-1;\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (j = L1; j <= L2; j++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][2];\n\tu31 = rsd[i][j][k][2] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u31;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) ", "context_chars": 100, "text": "------*/\n\n L1 = 0;\n L2 = ny-1;\n\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n for (j = L1; j <= L2; j++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][2];\n\tu31 = rsd[i][j][k][2] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u31;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;\n }\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) ", "context_chars": 100, "text": "l for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (j = L1; j <= L2; j++) {\n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][2];\n\tu31 = rsd[i][j][k][2] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u31;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;\n }\n }\n }\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21j = tmp * rsd[i][j][k][1];\n\tu31j = tmp * rsd[i][j][k][2];\n\tu41j = tmp * rsd[i][j][k][3];\n\tu51j = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j-1][k][0];\n\n\tu21jm1 = tmp * rsd[i][j-1][k][1];\n\tu31jm1 = tmp * rsd[i][j-1][k][2];\n\tu41jm1 = tmp * rsd[i][j-1][k][3];\n\tu51jm1 = tmp * rsd[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * \n\t ( u31j - u31jm1 );\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )\n\t\t - ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )\n\t + (1.0/6.0)\n\t * ty3 * ( u31j*u31j - u31jm1*u31jm1 )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dy1 * ty1 * ( rsd[i][j-1][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j+1][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( rsd[i][j-1][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j+1][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( rsd[i][j-1][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j+1][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( rsd[i][j-1][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j+1][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( rsd[i][j-1][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j+1][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[i][1][k][m] = frct[i][1][k][m]\n\t - dsspm * ( + 5.0 * rsd[i][1][k][m]\n\t\t - 4.0 * rsd[i][2][k][m]\n\t\t + rsd[i][3][k][m] );\n\tfrct[i][2][k][m] = frct[i][2][k][m]\n\t - dsspm * ( - 4.0 * rsd[i][1][k][m]\n\t\t + 6.0 * rsd[i][2][k][m]\n\t\t - 4.0 * rsd[i][3][k][m]\n\t\t + rsd[i][4][k][m] );\n }\n\n jst1 = 3;\n jend1 = ny - 4;\n\n for (j = jst1; j <= jend1; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i][j-2][k][m]\n\t\t\t\t - 4.0 * rsd[i][j-1][k][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i][j+1][k][m]\n\t\t\t\t + rsd[i][j+2][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[i][ny-3][k][m] = frct[i][ny-3][k][m]\n\t - dsspm * ( rsd[i][ny-5][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-4][k][m]\n\t\t\t\t + 6.0 * rsd[i][ny-3][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-2][k][m] );\n\tfrct[i][ny-2][k][m] = frct[i][ny-2][k][m]\n\t - dsspm * ( rsd[i][ny-4][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-3][k][m]\n\t\t\t\t + 5.0 * rsd[i][ny-2][k][m] );\n }\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) ", "context_chars": 100, "text": "2 * q ) * u31;\n }\n }\n }\n\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21j = tmp * rsd[i][j][k][1];\n\tu31j = tmp * rsd[i][j][k][2];\n\tu41j = tmp * rsd[i][j][k][3];\n\tu51j = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j-1][k][0];\n\n\tu21jm1 = tmp * rsd[i][j-1][k][1];\n\tu31jm1 = tmp * rsd[i][j-1][k][2];\n\tu41jm1 = tmp * rsd[i][j-1][k][3];\n\tu51jm1 = tmp * rsd[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * \n\t ( u31j - u31jm1 );\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )\n\t\t - ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )\n\t + (1.0/6.0)\n\t * ty3 * ( u31j*u31j - u31jm1*u31jm1 )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dy1 * ty1 * ( rsd[i][j-1][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j+1][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( rsd[i][j-1][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j+1][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( rsd[i][j-1][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j+1][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( rsd[i][j-1][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j+1][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( rsd[i][j-1][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j+1][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[i][1][k][m] = frct[i][1][k][m]\n\t - dsspm * ( + 5.0 * rsd[i][1][k][m]\n\t\t - 4.0 * rsd[i][2][k][m]\n\t\t + rsd[i][3][k][m] );\n\tfrct[i][2][k][m] = frct[i][2][k][m]\n\t - dsspm * ( - 4.0 * rsd[i][1][k][m]\n\t\t + 6.0 * rsd[i][2][k][m]\n\t\t - 4.0 * rsd[i][3][k][m]\n\t\t + rsd[i][4][k][m] );\n }\n\n jst1 = 3;\n jend1 = ny - 4;\n\n for (j = jst1; j <= jend1; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i][j-2][k][m]\n\t\t\t\t - 4.0 * rsd[i][j-1][k][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i][j+1][k][m]\n\t\t\t\t + rsd[i][j+2][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[i][ny-3][k][m] = frct[i][ny-3][k][m]\n\t - dsspm * ( rsd[i][ny-5][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-4][k][m]\n\t\t\t\t + 6.0 * rsd[i][ny-3][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-2][k][m] );\n\tfrct[i][ny-2][k][m] = frct[i][ny-2][k][m]\n\t - dsspm * ( rsd[i][ny-4][k][m]\n\t\t\t\t - 4.0 * rsd[i][ny-3][k][m]\n\t\t\t\t + 5.0 * rsd[i][ny-2][k][m] );\n }\n\n } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) ", "context_chars": 100, "text": " ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n for (k = 1; k <= nz - 2; k++) {\n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) ", "context_chars": 100, "text": ",L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t} #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) ", "context_chars": 100, "text": "][m] = frct[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21j = tmp * rsd[i][j][k][1];\n\tu31j = tmp * rsd[i][j][k][2];\n\tu41j = tmp * rsd[i][j][k][3];\n\tu51j = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j-1][k][0];\n\n\tu21jm1 = tmp * rsd[i][j-1][k][1];\n\tu31jm1 = tmp * rsd[i][j-1][k][2];\n\tu41jm1 = tmp * rsd[i][j-1][k][3];\n\tu51jm1 = tmp * rsd[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * \n\t ( u31j - u31jm1 );\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )\n\t\t - ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )\n\t + (1.0/6.0)\n\t * ty3 * ( u31j*u31j - u31jm1*u31jm1 )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) ", "context_chars": 100, "text": ".0)\n\t * ty3 * ( u31j*u31j - u31jm1*u31jm1 )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n for (j = jst; j <= jend; j++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dy1 * ty1 * ( rsd[i][j-1][k][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j+1][k][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( rsd[i][j-1][k][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j+1][k][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( rsd[i][j-1][k][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j+1][k][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( rsd[i][j-1][k][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j+1][k][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( rsd[i][j-1][k][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j+1][k][4] );\n } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "direction flux differences\n--------------------------------------------------------------------*/\n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,u41 ,q ,k ,j ,i ) \n for (k = 0; k <= nz-1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][3];\n\tu41 = rsd[i][j][k][3] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u41;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u41;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;\n }\n\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\t#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n }\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,u21k ,u31k ,u41k ,u51k ,tmp ,u21km1 ,u31km1 ,u41km1 ,u51km1 ,tz3 ,k ,j ,i ) \n for (k = 1; k <= nz-1; k++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21k = tmp * rsd[i][j][k][1];\n\tu31k = tmp * rsd[i][j][k][2];\n\tu41k = tmp * rsd[i][j][k][3];\n\tu51k = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j][k-1][0];\n\n\tu21km1 = tmp * rsd[i][j][k-1][1];\n\tu31km1 = tmp * rsd[i][j][k-1][2];\n\tu41km1 = tmp * rsd[i][j][k-1][3];\n\tu51km1 = tmp * rsd[i][j][k-1][4];\n\n\tflux[i][j][k][1] = tz3 * ( u21k - u21km1 );\n\tflux[i][j][k][2] = tz3 * ( u31k - u31km1 );\n\tflux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k \n\t\t\t\t\t - u41km1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )\n\t\t - ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )\n\t + (1.0/6.0)\n\t * tz3 * ( u41k*u41k - u41km1*u41km1 )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n }\n\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,tz1 ,dz1 ,dz2 ,tz3 ,dz3 ,dz4 ,dz5 ,k ,j ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dz1 * tz1 * ( rsd[i][j][k+1][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j][k-1][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )\n\t + dz2 * tz1 * ( rsd[i][j][k+1][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j][k-1][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )\n\t + dz3 * tz1 * ( rsd[i][j][k+1][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j][k-1][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )\n\t + dz4 * tz1 * ( rsd[i][j][k+1][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j][k-1][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )\n\t + dz5 * tz1 * ( rsd[i][j][k+1][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j][k-1][4] );\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tfrct[i][j][1][m] = frct[i][j][1][m]\n\t - dsspm * ( + 5.0 * rsd[i][j][1][m]\n\t\t - 4.0 * rsd[i][j][2][m]\n\t\t + rsd[i][j][3][m] );\n\tfrct[i][j][2][m] = frct[i][j][2][m]\n\t - dsspm * (- 4.0 * rsd[i][j][1][m]\n\t\t + 6.0 * rsd[i][j][2][m]\n\t\t - 4.0 * rsd[i][j][3][m]\n\t\t + rsd[i][j][4][m] );\n }\n\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) \n for (k = 3; k <= nz - 4; k++) {\n\t#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i][j][k-2][m]\n\t\t\t\t - 4.0 * rsd[i][j][k-1][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i][j][k+1][m]\n\t\t\t\t + rsd[i][j][k+2][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tfrct[i][j][nz-3][m] = frct[i][j][nz-3][m]\n\t - dsspm * ( rsd[i][j][nz-5][m]\n\t\t\t\t - 4.0 * rsd[i][j][nz-4][m]\n\t\t\t\t + 6.0 * rsd[i][j][nz-3][m]\n\t\t\t\t - 4.0 * rsd[i][j][nz-2][m] );\n frct[i][j][nz-2][m] = frct[i][j][nz-2][m]\n\t - dsspm * ( rsd[i][j][nz-4][m]\n\t\t\t\t - 4.0 * rsd[i][j][nz-3][m]\n\t\t\t\t + 5.0 * rsd[i][j][nz-2][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,u41 ,q ,k ,j ,i ) ", "context_chars": 100, "text": "ragma omp parallel for \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n for (k = 0; k <= nz-1; k++) {\n\tflux[i][j][k][0] = rsd[i][j][k][3];\n\tu41 = rsd[i][j][k][3] / rsd[i][j][k][0];\n\tq = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]\n\t\t + rsd[i][j][k][2] * rsd[i][j][k][2]\n\t\t + rsd[i][j][k][3] * rsd[i][j][k][3] )\n\t / rsd[i][j][k][0];\n\tflux[i][j][k][1] = rsd[i][j][k][1] * u41;\n\tflux[i][j][k][2] = rsd[i][j][k][2] * u41;\n\tflux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 * \n\t ( rsd[i][j][k][4] - q );\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,u41 ,q ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) ", "context_chars": 100, "text": " rsd[i][j][k][4] - q );\n\tflux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;\n }\n\n for (k = 1; k <= nz - 2; k++) {\n\t#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) ", "context_chars": 100, "text": "p parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t} #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,u21k ,u31k ,u41k ,u51k ,tmp ,u21km1 ,u31km1 ,u41km1 ,u51km1 ,tz3 ,k ,j ,i ) ", "context_chars": 100, "text": "][m] = frct[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n }\n for (k = 1; k <= nz-1; k++) {\n\ttmp = 1.0 / rsd[i][j][k][0];\n\n\tu21k = tmp * rsd[i][j][k][1];\n\tu31k = tmp * rsd[i][j][k][2];\n\tu41k = tmp * rsd[i][j][k][3];\n\tu51k = tmp * rsd[i][j][k][4];\n\n\ttmp = 1.0 / rsd[i][j][k-1][0];\n\n\tu21km1 = tmp * rsd[i][j][k-1][1];\n\tu31km1 = tmp * rsd[i][j][k-1][2];\n\tu41km1 = tmp * rsd[i][j][k-1][3];\n\tu51km1 = tmp * rsd[i][j][k-1][4];\n\n\tflux[i][j][k][1] = tz3 * ( u21k - u21km1 );\n\tflux[i][j][k][2] = tz3 * ( u31k - u31km1 );\n\tflux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k \n\t\t\t\t\t - u41km1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )\n\t\t - ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )\n\t + (1.0/6.0)\n\t * tz3 * ( u41k*u41k - u41km1*u41km1 )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,u21k ,u31k ,u41k ,u51k ,tmp ,u21km1 ,u31km1 ,u41km1 ,u51km1 ,tz3 ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,tz1 ,dz1 ,dz2 ,tz3 ,dz3 ,dz4 ,dz5 ,k ,j ,i ) ", "context_chars": 100, "text": ".0)\n\t * tz3 * ( u41k*u41k - u41km1*u41km1 )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n }\n\n for (k = 1; k <= nz - 2; k++) {\n\tfrct[i][j][k][0] = frct[i][j][k][0]\n\t + dz1 * tz1 * ( rsd[i][j][k+1][0]\n\t\t\t\t - 2.0 * rsd[i][j][k][0]\n\t\t\t\t + rsd[i][j][k-1][0] );\n\tfrct[i][j][k][1] = frct[i][j][k][1]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )\n\t + dz2 * tz1 * ( rsd[i][j][k+1][1]\n\t\t\t\t - 2.0 * rsd[i][j][k][1]\n\t\t\t\t + rsd[i][j][k-1][1] );\n\tfrct[i][j][k][2] = frct[i][j][k][2]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )\n\t + dz3 * tz1 * ( rsd[i][j][k+1][2]\n\t\t\t\t - 2.0 * rsd[i][j][k][2]\n\t\t\t\t + rsd[i][j][k-1][2] );\n\tfrct[i][j][k][3] = frct[i][j][k][3]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )\n\t + dz4 * tz1 * ( rsd[i][j][k+1][3]\n\t\t\t\t - 2.0 * rsd[i][j][k][3]\n\t\t\t\t + rsd[i][j][k-1][3] );\n\tfrct[i][j][k][4] = frct[i][j][k][4]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )\n\t + dz5 * tz1 * ( rsd[i][j][k+1][4]\n\t\t\t\t - 2.0 * rsd[i][j][k][4]\n\t\t\t\t + rsd[i][j][k-1][4] );\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,tz1 ,dz1 ,dz2 ,tz3 ,dz3 ,dz4 ,dz5 ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) ", "context_chars": 100, "text": "rsd[i][j][2][m]\n\t\t - 4.0 * rsd[i][j][3][m]\n\t\t + rsd[i][j][4][m] );\n }\n\n for (k = 3; k <= nz - 4; k++) {\n\t#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i][j][k-2][m]\n\t\t\t\t - 4.0 * rsd[i][j][k-1][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i][j][k+1][m]\n\t\t\t\t + rsd[i][j][k+2][m] );\n\t}\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) ", "context_chars": 100, "text": " parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) \n for (k = 3; k <= nz - 4; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t frct[i][j][k][m] = frct[i][j][k][m]\n\t - dsspm * ( rsd[i][j][k-2][m]\n\t\t\t\t - 4.0 * rsd[i][j][k-1][m]\n\t\t\t\t + 6.0 * rsd[i][j][k][m]\n\t\t\t\t - 4.0 * rsd[i][j][k+1][m]\n\t\t\t\t + rsd[i][j][k+2][m] );\n\t} #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ) ", "context_chars": 100, "text": "-------------------*/\n int i, j, k, m;\n int iglob, jglob;\n double tmp;\n double u000ijk[5];\n\n for (m = 0; m < 5; m++) {\n errnm[m] = 0.0;\n } #pragma omp parallel for firstprivate(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ist ,tmp ,m ,jst ,k ,j ,i ) ", "context_chars": 100, "text": "d; j++) {\n jglob = j;\n for (k = 1; k <= nz-2; k++) {\n\texact( iglob, jglob, k, u000ijk );\n\tfor (m = 0; m < 5; m++) {\n\t tmp = ( u000ijk[m] - u[i][j][k][m] );\n\t errnm[m] = errnm[m] + tmp *tmp;\n\t} #pragma omp parallel for firstprivate(ist ,tmp ,m ,jst ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz0 ,ny0 ,nx0 ,m ) ", "context_chars": 100, "text": "\t tmp = ( u000ijk[m] - u[i][j][k][m] );\n\t errnm[m] = errnm[m] + tmp *tmp;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );\n } #pragma omp parallel for firstprivate(nz0 ,ny0 ,nx0 ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(zeta ,eta ,xi ,u000ijk ,m ) ", "context_chars": 100, "text": "i = ((double)i) / (nx0 - 1);\n eta = ((double)j) / (ny0 - 1);\n zeta = ((double)k) / (nz - 1);\n\n for (m = 0; m < 5; m++) {\n u000ijk[m] = ce[m][0]\n + ce[m][1] * xi\n + ce[m][2] * eta\n + ce[m][3] * zeta\n + ce[m][4] * xi * xi\n + ce[m][5] * eta * eta\n + ce[m][6] * zeta * zeta\n + ce[m][7] * xi * xi * xi\n + ce[m][8] * eta * eta * eta\n + ce[m][9] * zeta * zeta * zeta\n + ce[m][10] * xi * xi * xi * xi\n + ce[m][11] * eta * eta * eta * eta\n + ce[m][12] * zeta * zeta * zeta * zeta;\n } #pragma omp parallel for firstprivate(zeta ,eta ,xi ,u000ijk ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,tmp1 ,tmp2 ,tmp3 ,k ,dz1 ,tz1 ,dy1 ,ty1 ,dx1 ,tx1 ,dt ,dz2 ,dy2 ,dx2 ,dz3 ,dy3 ,dx3 ,dz4 ,dy4 ,dx4 ,dz5 ,dy5 ,dx5 ,tz2 ,ty2 ,tx2 ,jst ,jend ,i ) ", "context_chars": 100, "text": "345 = C1 * C3 * C4 * C5;\n c34 = C3 * C4;\n\n #pragma omp for \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n\n/*--------------------------------------------------------------------\nc form the block daigonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n d[i][j][0][0] = 1.0\n\t+ dt * 2.0 * ( tx1 * dx1\n\t\t\t + ty1 * dy1\n\t\t\t + tz1 * dz1 );\n d[i][j][0][1] = 0.0;\n d[i][j][0][2] = 0.0;\n d[i][j][0][3] = 0.0;\n d[i][j][0][4] = 0.0;\n\n d[i][j][1][0] = dt * 2.0\n\t* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )\n\t + ty1 * ( - c34 * tmp2 * u[i][j][k][1] )\n\t + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );\n d[i][j][1][1] = 1.0\n\t+ dt * 2.0 \n\t* ( tx1 * r43 * c34 * tmp1\n\t + ty1 * c34 * tmp1\n\t + tz1 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx2\n\t\t\t + ty1 * dy2\n\t\t\t + tz1 * dz2 );\n d[i][j][1][2] = 0.0;\n d[i][j][1][3] = 0.0;\n d[i][j][1][4] = 0.0;\n\n d[i][j][2][0] = dt * 2.0\n\t* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )\n\t + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )\n\t + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );\n d[i][j][2][1] = 0.0;\n d[i][j][2][2] = 1.0\n\t+ dt * 2.0\n\t* ( tx1 * c34 * tmp1\n\t + ty1 * r43 * c34 * tmp1\n\t + tz1 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx3\n\t\t\t+ ty1 * dy3\n\t\t\t+ tz1 * dz3 );\n d[i][j][2][3] = 0.0;\n d[i][j][2][4] = 0.0;\n\n d[i][j][3][0] = dt * 2.0\n\t* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )\n\t + ty1 * ( - c34 * tmp2 * u[i][j][k][3] )\n\t + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );\n d[i][j][3][1] = 0.0;\n d[i][j][3][2] = 0.0;\n d[i][j][3][3] = 1.0\n\t+ dt * 2.0\n\t* ( tx1 * c34 * tmp1\n\t + ty1 * c34 * tmp1\n\t + tz1 * r43 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx4\n\t\t\t+ ty1 * dy4\n\t\t\t+ tz1 * dz4 );\n d[i][j][3][4] = 0.0;\n\n d[i][j][4][0] = dt * 2.0\n\t* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] )\n\t + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] )\n\t + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] ) );\n d[i][j][4][1] = dt * 2.0\n\t* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]\n\t + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]\n\t + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );\n d[i][j][4][2] = dt * 2.0\n\t* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]\n\t + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]\n\t + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );\n d[i][j][4][3] = dt * 2.0\n\t* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]\n\t + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]\n\t + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );\n d[i][j][4][4] = 1.0\n\t+ dt * 2.0 * ( tx1 * c1345 * tmp1\n\t\t + ty1 * c1345 * tmp1\n\t\t + tz1 * c1345 * tmp1 )\n + dt * 2.0 * ( tx1 * dx5\n\t\t\t+ ty1 * dy5\n\t\t\t+ tz1 * dz5 );\n\n/*--------------------------------------------------------------------\nc form the first block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k-1][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n a[i][j][0][0] = - dt * tz1 * dz1;\n a[i][j][0][1] = 0.0;\n a[i][j][0][2] = 0.0;\n a[i][j][0][3] = - dt * tz2;\n a[i][j][0][4] = 0.0;\n\n a[i][j][1][0] = - dt * tz2\n\t* ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )\n\t- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] );\n a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )\n\t- dt * tz1 * c34 * tmp1\n\t- dt * tz1 * dz2 ;\n a[i][j][1][2] = 0.0;\n a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 );\n a[i][j][1][4] = 0.0;\n\n a[i][j][2][0] = - dt * tz2\n\t* ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )\n\t- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] );\n a[i][j][2][1] = 0.0;\n a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )\n\t- dt * tz1 * ( c34 * tmp1 )\n\t- dt * tz1 * dz3;\n a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 );\n a[i][j][2][4] = 0.0;\n\n a[i][j][3][0] = - dt * tz2\n\t* ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 )\n\t + 0.50 * C2\n\t * ( ( u[i][j][k-1][1] * u[i][j][k-1][1]\n\t\t + u[i][j][k-1][2] * u[i][j][k-1][2]\n\t\t + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) )\n\t- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] );\n a[i][j][3][1] = - dt * tz2\n\t* ( - C2 * ( u[i][j][k-1][1] * tmp1 ) );\n a[i][j][3][2] = - dt * tz2\n\t* ( - C2 * ( u[i][j][k-1][2] * tmp1 ) );\n a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 )\n\t* ( u[i][j][k-1][3] * tmp1 )\n\t- dt * tz1 * ( r43 * c34 * tmp1 )\n\t- dt * tz1 * dz4;\n a[i][j][3][4] = - dt * tz2 * C2;\n\n a[i][j][4][0] = - dt * tz2\n\t* ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1]\n + u[i][j][k-1][2] * u[i][j][k-1][2]\n + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2\n\t - C1 * ( u[i][j][k-1][4] * tmp1 ) )\n\t * ( u[i][j][k-1][3] * tmp1 ) )\n\t- dt * tz1\n\t* ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1])\n\t - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2])\n\t - ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3])\n\t - c1345 * tmp2 * u[i][j][k-1][4] );\n a[i][j][4][1] = - dt * tz2\n\t* ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )\n\t- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1];\n a[i][j][4][2] = - dt * tz2\n\t* ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )\n\t- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2];\n a[i][j][4][3] = - dt * tz2\n\t* ( C1 * ( u[i][j][k-1][4] * tmp1 )\n - 0.50 * C2\n * ( ( u[i][j][k-1][1]*u[i][j][k-1][1]\n\t\t + u[i][j][k-1][2]*u[i][j][k-1][2]\n\t\t + 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) )\n\t- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3];\n a[i][j][4][4] = - dt * tz2\n\t* ( C1 * ( u[i][j][k-1][3] * tmp1 ) )\n\t- dt * tz1 * c1345 * tmp1\n\t- dt * tz1 * dz5;\n\n/*--------------------------------------------------------------------\nc form the second block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j-1][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n b[i][j][0][0] = - dt * ty1 * dy1;\n b[i][j][0][1] = 0.0;\n b[i][j][0][2] = - dt * ty2;\n b[i][j][0][3] = 0.0;\n b[i][j][0][4] = 0.0;\n\n b[i][j][1][0] = - dt * ty2\n\t* ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )\n\t- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] );\n b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )\n\t- dt * ty1 * ( c34 * tmp1 )\n\t- dt * ty1 * dy2;\n b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 );\n b[i][j][1][3] = 0.0;\n b[i][j][1][4] = 0.0;\n\n b[i][j][2][0] = - dt * ty2\n\t* ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 )\n\t + 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1]\n\t\t\t + u[i][j-1][k][2] * u[i][j-1][k][2]\n\t\t\t + u[i][j-1][k][3] * u[i][j-1][k][3] )\n\t\t\t * tmp2 ) )\n\t- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] );\n b[i][j][2][1] = - dt * ty2\n\t* ( - C2 * ( u[i][j-1][k][1] * tmp1 ) );\n b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 )\n\t\t\t\t * ( u[i][j-1][k][2] * tmp1 ) )\n\t- dt * ty1 * ( r43 * c34 * tmp1 )\n\t- dt * ty1 * dy3;\n b[i][j][2][3] = - dt * ty2\n\t* ( - C2 * ( u[i][j-1][k][3] * tmp1 ) );\n b[i][j][2][4] = - dt * ty2 * C2;\n\n b[i][j][3][0] = - dt * ty2\n\t* ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )\n\t- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] );\n b[i][j][3][1] = 0.0;\n b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 );\n b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )\n\t- dt * ty1 * ( c34 * tmp1 )\n\t- dt * ty1 * dy4;\n b[i][j][3][4] = 0.0;\n\n b[i][j][4][0] = - dt * ty2\n\t* ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1]\n\t\t + u[i][j-1][k][2] * u[i][j-1][k][2]\n\t\t + u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2\n\t - C1 * ( u[i][j-1][k][4] * tmp1 ) )\n\t * ( u[i][j-1][k][2] * tmp1 ) )\n\t- dt * ty1\n\t* ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1]))\n\t - ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2]))\n\t - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3]))\n\t - c1345*tmp2*u[i][j-1][k][4] );\n b[i][j][4][1] = - dt * ty2\n\t* ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )\n\t- dt * ty1\n\t* ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1];\n b[i][j][4][2] = - dt * ty2\n\t* ( C1 * ( u[i][j-1][k][4] * tmp1 )\n\t - 0.50 * C2 \n\t * ( ( u[i][j-1][k][1]*u[i][j-1][k][1]\n + 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2]\n\t\t + u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) )\n\t- dt * ty1\n\t* ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2];\n b[i][j][4][3] = - dt * ty2\n\t* ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )\n\t- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3];\n b[i][j][4][4] = - dt * ty2\n\t* ( C1 * ( u[i][j-1][k][2] * tmp1 ) )\n\t- dt * ty1 * c1345 * tmp1\n\t- dt * ty1 * dy5;\n\n/*--------------------------------------------------------------------\nc form the third block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i-1][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n c[i][j][0][0] = - dt * tx1 * dx1;\n c[i][j][0][1] = - dt * tx2;\n c[i][j][0][2] = 0.0;\n c[i][j][0][3] = 0.0;\n c[i][j][0][4] = 0.0;\n\n c[i][j][1][0] = - dt * tx2\n\t* ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 )\n\t + C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1]\n + u[i-1][j][k][2] * u[i-1][j][k][2]\n + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 )\n\t- dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] );\n c[i][j][1][1] = - dt * tx2\n\t* ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) )\n\t- dt * tx1 * ( r43 * c34 * tmp1 )\n\t- dt * tx1 * dx2;\n c[i][j][1][2] = - dt * tx2\n\t* ( - C2 * ( u[i-1][j][k][2] * tmp1 ) );\n c[i][j][1][3] = - dt * tx2\n\t* ( - C2 * ( u[i-1][j][k][3] * tmp1 ) );\n c[i][j][1][4] = - dt * tx2 * C2;\n\n c[i][j][2][0] = - dt * tx2\n\t* ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 )\n\t- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] );\n c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 );\n c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )\n\t- dt * tx1 * ( c34 * tmp1 )\n\t- dt * tx1 * dx3;\n c[i][j][2][3] = 0.0;\n c[i][j][2][4] = 0.0;\n\n c[i][j][3][0] = - dt * tx2\n\t* ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 )\n\t- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] );\n c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 );\n c[i][j][3][2] = 0.0;\n c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )\n\t- dt * tx1 * ( c34 * tmp1 )\n\t- dt * tx1 * dx4;\n c[i][j][3][4] = 0.0;\n\n c[i][j][4][0] = - dt * tx2\n\t* ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1]\n\t\t + u[i-1][j][k][2] * u[i-1][j][k][2]\n\t\t + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2\n\t - C1 * ( u[i-1][j][k][4] * tmp1 ) )\n\t * ( u[i-1][j][k][1] * tmp1 ) )\n\t- dt * tx1\n\t* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) )\n\t - c1345 * tmp2 * u[i-1][j][k][4] );\n c[i][j][4][1] = - dt * tx2\n\t* ( C1 * ( u[i-1][j][k][4] * tmp1 )\n\t - 0.50 * C2\n\t * ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1]\n\t\t + u[i-1][j][k][2]*u[i-1][j][k][2]\n\t\t + u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) )\n\t- dt * tx1\n\t* ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1];\n c[i][j][4][2] = - dt * tx2\n\t* ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 )\n\t- dt * tx1\n\t* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2];\n c[i][j][4][3] = - dt * tx2\n\t* ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 )\n\t- dt * tx1\n\t* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3];\n c[i][j][4][4] = - dt * tx2\n\t* ( C1 * ( u[i-1][j][k][1] * tmp1 ) )\n\t- dt * tx1 * c1345 * tmp1\n\t- dt * tx1 * dx5;\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,tmp1 ,tmp2 ,tmp3 ,k ,dz1 ,tz1 ,dy1 ,ty1 ,dx1 ,tx1 ,dt ,dz2 ,dy2 ,dx2 ,dz3 ,dy3 ,dx3 ,dz4 ,dy4 ,dx4 ,dz5 ,dy5 ,dx5 ,tz2 ,ty2 ,tx2 ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ist ,iend ,j ,tmp1 ,tmp2 ,tmp3 ,k ,dz1 ,tz1 ,dy1 ,ty1 ,dx1 ,tx1 ,dt ,dz2 ,dy2 ,dx2 ,dz3 ,dy3 ,dx3 ,dz4 ,dy4 ,dx4 ,dz5 ,dy5 ,dx5 ,tx2 ,ty2 ,tz2 ,jst ,jend ,i ) ", "context_chars": 100, "text": " c34 = C3 * C4;\n\n#if defined(_OPENMP) \n #pragma omp for \n for (i = iend; i >= ist; i--) {\n for (j = jend; j >= jst; j--) {\n#else\t \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n\t\n\n/*--------------------------------------------------------------------\nc form the block daigonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n d[i][j][0][0] = 1.0\n\t+ dt * 2.0 * ( tx1 * dx1\n\t\t\t + ty1 * dy1\n\t\t\t + tz1 * dz1 );\n d[i][j][0][1] = 0.0;\n d[i][j][0][2] = 0.0;\n d[i][j][0][3] = 0.0;\n d[i][j][0][4] = 0.0;\n\n d[i][j][1][0] = dt * 2.0\n\t* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )\n\t + ty1 * ( - c34 * tmp2 * u[i][j][k][1] )\n\t + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );\n d[i][j][1][1] = 1.0\n\t+ dt * 2.0 \n\t* ( tx1 * r43 * c34 * tmp1\n\t + ty1 * c34 * tmp1\n\t + tz1 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx2\n\t\t\t + ty1 * dy2\n\t\t\t + tz1 * dz2 );\n d[i][j][1][2] = 0.0;\n d[i][j][1][3] = 0.0;\n d[i][j][1][4] = 0.0;\n\n d[i][j][2][0] = dt * 2.0\n\t* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )\n\t + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )\n\t + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );\n d[i][j][2][1] = 0.0;\n d[i][j][2][2] = 1.0\n\t+ dt * 2.0\n\t* ( tx1 * c34 * tmp1\n\t + ty1 * r43 * c34 * tmp1\n\t + tz1 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx3\n\t\t\t+ ty1 * dy3\n\t\t\t+ tz1 * dz3 );\n d[i][j][2][3] = 0.0;\n d[i][j][2][4] = 0.0;\n\n d[i][j][3][0] = dt * 2.0\n\t* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )\n\t + ty1 * ( - c34 * tmp2 * u[i][j][k][3] )\n\t + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );\n d[i][j][3][1] = 0.0;\n d[i][j][3][2] = 0.0;\n d[i][j][3][3] = 1.0\n\t+ dt * 2.0\n\t* ( tx1 * c34 * tmp1\n\t + ty1 * c34 * tmp1\n\t + tz1 * r43 * c34 * tmp1 )\n\t+ dt * 2.0 * ( tx1 * dx4\n\t\t\t+ ty1 * dy4\n\t\t\t+ tz1 * dz4 );\n d[i][j][3][4] = 0.0;\n\n d[i][j][4][0] = dt * 2.0\n\t* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] )\n\t + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] )\n\t + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )\n\t\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )\n\t\t - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )\n\t\t - ( c1345 ) * tmp2 * u[i][j][k][4] ) );\n d[i][j][4][1] = dt * 2.0\n\t* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]\n\t + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]\n\t + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );\n d[i][j][4][2] = dt * 2.0\n\t* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]\n\t + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]\n\t + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );\n d[i][j][4][3] = dt * 2.0\n\t* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]\n\t + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]\n\t + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );\n d[i][j][4][4] = 1.0\n + dt * 2.0 * ( tx1 * c1345 * tmp1\n\t\t + ty1 * c1345 * tmp1\n\t\t + tz1 * c1345 * tmp1 )\n + dt * 2.0 * ( tx1 * dx5\n\t\t\t+ ty1 * dy5\n\t\t\t+ tz1 * dz5 );\n\n/*--------------------------------------------------------------------\nc form the first block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i+1][j][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n a[i][j][0][0] = - dt * tx1 * dx1;\n a[i][j][0][1] = dt * tx2;\n a[i][j][0][2] = 0.0;\n a[i][j][0][3] = 0.0;\n a[i][j][0][4] = 0.0;\n\n a[i][j][1][0] = dt * tx2\n\t* ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 )\n\t + C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1]\n + u[i+1][j][k][2] * u[i+1][j][k][2]\n + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 )\n\t- dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] );\n a[i][j][1][1] = dt * tx2\n\t* ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) )\n\t- dt * tx1 * ( r43 * c34 * tmp1 )\n\t- dt * tx1 * dx2;\n a[i][j][1][2] = dt * tx2\n\t* ( - C2 * ( u[i+1][j][k][2] * tmp1 ) );\n a[i][j][1][3] = dt * tx2\n\t* ( - C2 * ( u[i+1][j][k][3] * tmp1 ) );\n a[i][j][1][4] = dt * tx2 * C2 ;\n\n a[i][j][2][0] = dt * tx2\n\t* ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 )\n\t- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] );\n a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 );\n a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )\n\t- dt * tx1 * ( c34 * tmp1 )\n\t- dt * tx1 * dx3;\n a[i][j][2][3] = 0.0;\n a[i][j][2][4] = 0.0;\n\n a[i][j][3][0] = dt * tx2\n\t* ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 )\n\t- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] );\n a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 );\n a[i][j][3][2] = 0.0;\n a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )\n\t- dt * tx1 * ( c34 * tmp1 )\n\t- dt * tx1 * dx4;\n a[i][j][3][4] = 0.0;\n\n a[i][j][4][0] = dt * tx2\n\t* ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1]\n\t\t + u[i+1][j][k][2] * u[i+1][j][k][2]\n\t\t + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2\n\t - C1 * ( u[i+1][j][k][4] * tmp1 ) )\n\t * ( u[i+1][j][k][1] * tmp1 ) )\n\t- dt * tx1\n\t* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) )\n\t - c1345 * tmp2 * u[i+1][j][k][4] );\n a[i][j][4][1] = dt * tx2\n\t* ( C1 * ( u[i+1][j][k][4] * tmp1 )\n\t - 0.50 * C2\n\t * ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1]\n\t\t + u[i+1][j][k][2]*u[i+1][j][k][2]\n\t\t + u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) )\n\t- dt * tx1\n\t* ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1];\n a[i][j][4][2] = dt * tx2\n\t* ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 )\n\t- dt * tx1\n\t* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2];\n a[i][j][4][3] = dt * tx2\n\t* ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 )\n\t- dt * tx1\n\t* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3];\n a[i][j][4][4] = dt * tx2\n\t* ( C1 * ( u[i+1][j][k][1] * tmp1 ) )\n\t- dt * tx1 * c1345 * tmp1\n\t- dt * tx1 * dx5;\n\n/*--------------------------------------------------------------------\nc form the second block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j+1][k][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n b[i][j][0][0] = - dt * ty1 * dy1;\n b[i][j][0][1] = 0.0;\n b[i][j][0][2] = dt * ty2;\n b[i][j][0][3] = 0.0;\n b[i][j][0][4] = 0.0;\n\n b[i][j][1][0] = dt * ty2\n\t* ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )\n\t- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] );\n b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )\n\t- dt * ty1 * ( c34 * tmp1 )\n\t- dt * ty1 * dy2;\n b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 );\n b[i][j][1][3] = 0.0;\n b[i][j][1][4] = 0.0;\n\n b[i][j][2][0] = dt * ty2\n\t* ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 )\n\t + 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1]\n\t\t\t + u[i][j+1][k][2] * u[i][j+1][k][2]\n\t\t\t + u[i][j+1][k][3] * u[i][j+1][k][3] )\n\t\t\t * tmp2 ) )\n\t- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] );\n b[i][j][2][1] = dt * ty2\n\t* ( - C2 * ( u[i][j+1][k][1] * tmp1 ) );\n b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 )\n\t\t\t\t * ( u[i][j+1][k][2] * tmp1 ) )\n\t- dt * ty1 * ( r43 * c34 * tmp1 )\n\t- dt * ty1 * dy3;\n b[i][j][2][3] = dt * ty2\n\t* ( - C2 * ( u[i][j+1][k][3] * tmp1 ) );\n b[i][j][2][4] = dt * ty2 * C2;\n\n b[i][j][3][0] = dt * ty2\n\t* ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )\n\t- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] );\n b[i][j][3][1] = 0.0;\n b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 );\n b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )\n\t- dt * ty1 * ( c34 * tmp1 )\n\t- dt * ty1 * dy4;\n b[i][j][3][4] = 0.0;\n\n b[i][j][4][0] = dt * ty2\n\t* ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1]\n\t\t + u[i][j+1][k][2] * u[i][j+1][k][2]\n\t\t + u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2\n\t - C1 * ( u[i][j+1][k][4] * tmp1 ) )\n\t * ( u[i][j+1][k][2] * tmp1 ) )\n\t- dt * ty1\n\t* ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) )\n\t - ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) )\n\t - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) )\n\t - c1345*tmp2*u[i][j+1][k][4] );\n b[i][j][4][1] = dt * ty2\n\t* ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )\n\t- dt * ty1\n\t* ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1];\n b[i][j][4][2] = dt * ty2\n\t* ( C1 * ( u[i][j+1][k][4] * tmp1 )\n\t - 0.50 * C2 \n\t * ( ( u[i][j+1][k][1]*u[i][j+1][k][1]\n\t\t + 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2]\n\t\t + u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) )\n\t- dt * ty1\n\t* ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2];\n b[i][j][4][3] = dt * ty2\n\t* ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )\n\t- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3];\n b[i][j][4][4] = dt * ty2\n\t* ( C1 * ( u[i][j+1][k][2] * tmp1 ) )\n\t- dt * ty1 * c1345 * tmp1\n\t- dt * ty1 * dy5;\n\n/*--------------------------------------------------------------------\nc form the third block sub-diagonal\n--------------------------------------------------------------------*/\n tmp1 = 1.0 / u[i][j][k+1][0];\n tmp2 = tmp1 * tmp1;\n tmp3 = tmp1 * tmp2;\n\n c[i][j][0][0] = - dt * tz1 * dz1;\n c[i][j][0][1] = 0.0;\n c[i][j][0][2] = 0.0;\n c[i][j][0][3] = dt * tz2;\n c[i][j][0][4] = 0.0;\n\n c[i][j][1][0] = dt * tz2\n\t* ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )\n\t- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] );\n c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )\n\t- dt * tz1 * c34 * tmp1\n\t- dt * tz1 * dz2 ;\n c[i][j][1][2] = 0.0;\n c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 );\n c[i][j][1][4] = 0.0;\n\n c[i][j][2][0] = dt * tz2\n\t* ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )\n\t- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] );\n c[i][j][2][1] = 0.0;\n c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )\n\t- dt * tz1 * ( c34 * tmp1 )\n\t- dt * tz1 * dz3;\n c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 );\n c[i][j][2][4] = 0.0;\n\n c[i][j][3][0] = dt * tz2\n\t* ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 )\n\t + 0.50 * C2\n\t * ( ( u[i][j][k+1][1] * u[i][j][k+1][1]\n\t\t + u[i][j][k+1][2] * u[i][j][k+1][2]\n\t\t + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) )\n\t- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] );\n c[i][j][3][1] = dt * tz2\n\t* ( - C2 * ( u[i][j][k+1][1] * tmp1 ) );\n c[i][j][3][2] = dt * tz2\n\t* ( - C2 * ( u[i][j][k+1][2] * tmp1 ) );\n c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 )\n\t* ( u[i][j][k+1][3] * tmp1 )\n\t- dt * tz1 * ( r43 * c34 * tmp1 )\n\t- dt * tz1 * dz4;\n c[i][j][3][4] = dt * tz2 * C2;\n\n c[i][j][4][0] = dt * tz2\n\t* ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1]\n + u[i][j][k+1][2] * u[i][j][k+1][2]\n + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2\n\t - C1 * ( u[i][j][k+1][4] * tmp1 ) )\n\t * ( u[i][j][k+1][3] * tmp1 ) )\n\t- dt * tz1\n\t* ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) )\n\t - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) )\n\t - ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) )\n\t - c1345 * tmp2 * u[i][j][k+1][4] );\n c[i][j][4][1] = dt * tz2\n\t* ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )\n\t- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1];\n c[i][j][4][2] = dt * tz2\n\t* ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )\n\t- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2];\n c[i][j][4][3] = dt * tz2\n\t* ( C1 * ( u[i][j][k+1][4] * tmp1 )\n - 0.50 * C2\n * ( ( u[i][j][k+1][1]*u[i][j][k+1][1]\n\t\t + u[i][j][k+1][2]*u[i][j][k+1][2]\n\t\t + 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) )\n\t- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3];\n c[i][j][4][4] = dt * tz2\n\t* ( C1 * ( u[i][j][k+1][3] * tmp1 ) )\n\t- dt * tz1 * c1345 * tmp1\n\t- dt * tz1 * dz5;\n }\n }\n} #pragma omp parallel for firstprivate(ist ,iend ,j ,tmp1 ,tmp2 ,tmp3 ,k ,dz1 ,tz1 ,dy1 ,ty1 ,dx1 ,tx1 ,dt ,dz2 ,dy2 ,dx2 ,dz3 ,dy3 ,dx3 ,dz4 ,dy4 ,dx4 ,dz5 ,dy5 ,dx5 ,tx2 ,ty2 ,tz2 ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(sum ,m ) ", "context_chars": 100, "text": "-----------------*/\n int i, j, k, m;\n double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0;\n\n for (m = 0; m < 5; m++) {\n sum[m] = 0.0;\n } #pragma omp parallel for firstprivate(sum ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(sum ,m ) \n for (m = 0; m < 5; m++) {\n sum[m] = 0.0;\n }\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) \n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) \n for (k = 1; k <= nz0-2; k++) {\n\t sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];\n\t sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];\n\t sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];\n\t sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];\n\t sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) ", "context_chars": 100, "text": " 5; m++) {\n sum[m] = 0.0;\n }\n\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) \n for (k = 1; k <= nz0-2; k++) {\n\t sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];\n\t sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];\n\t sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];\n\t sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];\n\t sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];\n }\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) ", "context_chars": 100, "text": "3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) \n for (j = jst; j <= jend; j++) {\n for (k = 1; k <= nz0-2; k++) {\n\t sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];\n\t sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];\n\t sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];\n\t sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];\n\t sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz0 ,ny0 ,nx0 ,sum ,m ) ", "context_chars": 100, "text": "0;\n sum[1] += sum1;\n sum[2] += sum2;\n sum[3] += sum3;\n sum[4] += sum4;\n }\n \n for (m = 0; m < 5; m++) {\n sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );\n } #pragma omp parallel for firstprivate(nz0 ,ny0 ,nx0 ,sum ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,i ) ", "context_chars": 100, "text": "-----------\nc initialize\n--------------------------------------------------------------------*/\n for (i = 0; i <= ISIZ2+1; i++) {\n #pragma omp parallel for firstprivate(k ,i ) \n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n } #pragma omp parallel for firstprivate(k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,i ) ", "context_chars": 100, "text": "----------*/\n #pragma omp parallel for firstprivate(k ,i ) \n for (i = 0; i <= ISIZ2+1; i++) {\n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n } #pragma omp parallel for firstprivate(k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin ,j ,jbeg ,ki1 ,ki2 ,jfin ,i ) ", "context_chars": 100, "text": ") \n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n }\n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n #pragma omp parallel for firstprivate(ibeg ,ifin ,j ,jbeg ,ki1 ,ki2 ,jfin ,i ) \n for (j = jbeg; j <= jfin; j++) {\n jglob = j;\n\n k = ki1;\n\n phi1[i][j] = C2*( u[i][j][k][4]\n\t\t\t- 0.50 * ( pow2(u[i][j][k][1])\n\t\t\t\t + pow2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n\n k = ki2;\n\n phi2[i][j] = C2*( u[i][j][k][4]\n\t\t\t- 0.50 * ( pow2(u[i][j][k][1])\n\t\t\t\t + pow2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n }\n } #pragma omp parallel for firstprivate(ibeg ,ifin ,j ,jbeg ,ki1 ,ki2 ,jfin ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin ,j ,jbeg ,ki1 ,ki2 ,jfin ,i ) ", "context_chars": 100, "text": "ate(ibeg ,ifin ,j ,jbeg ,ki1 ,ki2 ,jfin ,i ) \n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n for (j = jbeg; j <= jfin; j++) {\n jglob = j;\n\n k = ki1;\n\n phi1[i][j] = C2*( u[i][j][k][4]\n\t\t\t- 0.50 * ( pow2(u[i][j][k][1])\n\t\t\t\t + pow2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n\n k = ki2;\n\n phi2[i][j] = C2*( u[i][j][k][4]\n\t\t\t- 0.50 * ( pow2(u[i][j][k][1])\n\t\t\t\t + pow2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n } #pragma omp parallel for firstprivate(ibeg ,ifin ,j ,jbeg ,ki1 ,ki2 ,jfin ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin1 ,j ,jbeg ,jfin1 ,i ) reduction(+:frc1) ", "context_chars": 100, "text": "w2(u[i][j][k][2])\n\t\t\t\t + pow2(u[i][j][k][3]) )\n\t\t\t/ u[i][j][k][0] );\n }\n }\n\n frc1 = 0.0;\n\n for (i = ibeg; i <= ifin1; i++) {\n #pragma omp parallel for firstprivate(ibeg ,ifin1 ,j ,jbeg ,jfin1 ,i ) reduction(+:frc1) \n for (j = jbeg; j <= jfin1; j++) {\n frc1 = frc1 + ( phi1[i][j]\n\t\t + phi1[i+1][j]\n\t\t + phi1[i][j+1]\n\t\t + phi1[i+1][j+1]\n\t\t + phi2[i][j]\n\t\t + phi2[i+1][j]\n\t\t + phi2[i][j+1]\n\t\t + phi2[i+1][j+1] );\n }\n } #pragma omp parallel for firstprivate(ibeg ,ifin1 ,j ,jbeg ,jfin1 ,i ) reduction(+:frc1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin1 ,j ,jbeg ,jfin1 ,i ) reduction(+:frc1) ", "context_chars": 100, "text": "private(ibeg ,ifin1 ,j ,jbeg ,jfin1 ,i ) reduction(+:frc1) \n for (i = ibeg; i <= ifin1; i++) {\n for (j = jbeg; j <= jfin1; j++) {\n frc1 = frc1 + ( phi1[i][j]\n\t\t + phi1[i+1][j]\n\t\t + phi1[i][j+1]\n\t\t + phi1[i+1][j+1]\n\t\t + phi2[i][j]\n\t\t + phi2[i+1][j]\n\t\t + phi2[i][j+1]\n\t\t + phi2[i+1][j+1] );\n } #pragma omp parallel for firstprivate(ibeg ,ifin1 ,j ,jbeg ,jfin1 ,i ) reduction(+:frc1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,i ) ", "context_chars": 100, "text": "-----------\nc initialize\n--------------------------------------------------------------------*/\n for (i = 0; i <= ISIZ2+1; i++) {\n #pragma omp parallel for firstprivate(k ,i ) \n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n } #pragma omp parallel for firstprivate(k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,i ) ", "context_chars": 100, "text": "----------*/\n #pragma omp parallel for firstprivate(k ,i ) \n for (i = 0; i <= ISIZ2+1; i++) {\n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n } #pragma omp parallel for firstprivate(k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jbeg ,ki1 ,ki2 ,i ) ", "context_chars": 100, "text": " phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n }\n jglob = jbeg;\n if (jglob == ji1) {\n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jbeg ,ki1 ,ki2 ,i ) \n for (k = ki1; k <= ki2; k++) {\n\tphi1[i][k] = C2*( u[i][jbeg][k][4]\n\t\t\t - 0.50 * ( pow2(u[i][jbeg][k][1])\n\t\t\t\t + pow2(u[i][jbeg][k][2])\n\t\t\t\t + pow2(u[i][jbeg][k][3]) )\n\t\t\t / u[i][jbeg][k][0] );\n }\n } #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jbeg ,ki1 ,ki2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jbeg ,ki1 ,ki2 ,i ) ", "context_chars": 100, "text": "ate(ibeg ,ifin ,k ,jbeg ,ki1 ,ki2 ,i ) \n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n for (k = ki1; k <= ki2; k++) {\n\tphi1[i][k] = C2*( u[i][jbeg][k][4]\n\t\t\t - 0.50 * ( pow2(u[i][jbeg][k][1])\n\t\t\t\t + pow2(u[i][jbeg][k][2])\n\t\t\t\t + pow2(u[i][jbeg][k][3]) )\n\t\t\t / u[i][jbeg][k][0] );\n } #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jbeg ,ki1 ,ki2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jfin ,ki1 ,ki2 ,i ) ", "context_chars": 100, "text": "g][k][3]) )\n\t\t\t / u[i][jbeg][k][0] );\n }\n }\n }\n\n jglob = jfin;\n if (jglob == ji2) {\n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jfin ,ki1 ,ki2 ,i ) \n for (k = ki1; k <= ki2; k++) {\n\tphi2[i][k] = C2*( u[i][jfin][k][4]\n\t\t\t - 0.50 * ( pow2(u[i][jfin][k][1])\n\t\t\t\t + pow2(u[i][jfin][k][2])\n\t\t\t\t + pow2(u[i][jfin][k][3]) )\n\t\t\t / u[i][jfin][k][0] );\n }\n } #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jfin ,ki1 ,ki2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jfin ,ki1 ,ki2 ,i ) ", "context_chars": 100, "text": "ate(ibeg ,ifin ,k ,jfin ,ki1 ,ki2 ,i ) \n for (i = ibeg; i <= ifin; i++) {\n iglob = i;\n for (k = ki1; k <= ki2; k++) {\n\tphi2[i][k] = C2*( u[i][jfin][k][4]\n\t\t\t - 0.50 * ( pow2(u[i][jfin][k][1])\n\t\t\t\t + pow2(u[i][jfin][k][2])\n\t\t\t\t + pow2(u[i][jfin][k][3]) )\n\t\t\t / u[i][jfin][k][0] );\n } #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jfin ,ki1 ,ki2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin1 ,k ,ki1 ,ki2 ,i ) reduction(+:frc2) ", "context_chars": 100, "text": "\t\t\t\t + pow2(u[i][jfin][k][3]) )\n\t\t\t / u[i][jfin][k][0] );\n }\n }\n }\n\n\n frc2 = 0.0;\n for (i = ibeg; i <= ifin1; i++) {\n #pragma omp parallel for firstprivate(ibeg ,ifin1 ,k ,ki1 ,ki2 ,i ) reduction(+:frc2) \n for (k = ki1; k <= ki2-1; k++) {\n frc2 = frc2 + ( phi1[i][k]\n\t\t + phi1[i+1][k]\n\t\t + phi1[i][k+1]\n\t\t + phi1[i+1][k+1]\n\t\t + phi2[i][k]\n\t\t + phi2[i+1][k]\n\t\t + phi2[i][k+1]\n\t\t + phi2[i+1][k+1] );\n }\n } #pragma omp parallel for firstprivate(ibeg ,ifin1 ,k ,ki1 ,ki2 ,i ) reduction(+:frc2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(ibeg ,ifin1 ,k ,ki1 ,ki2 ,i ) reduction(+:frc2) ", "context_chars": 100, "text": "rstprivate(ibeg ,ifin1 ,k ,ki1 ,ki2 ,i ) reduction(+:frc2) \n for (i = ibeg; i <= ifin1; i++) {\n for (k = ki1; k <= ki2-1; k++) {\n frc2 = frc2 + ( phi1[i][k]\n\t\t + phi1[i+1][k]\n\t\t + phi1[i][k+1]\n\t\t + phi1[i+1][k+1]\n\t\t + phi2[i][k]\n\t\t + phi2[i+1][k]\n\t\t + phi2[i][k+1]\n\t\t + phi2[i+1][k+1] );\n } #pragma omp parallel for firstprivate(ibeg ,ifin1 ,k ,ki1 ,ki2 ,i ) reduction(+:frc2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,i ) ", "context_chars": 100, "text": "-----------\nc initialize\n--------------------------------------------------------------------*/\n for (i = 0; i <= ISIZ2+1; i++) {\n #pragma omp parallel for firstprivate(k ,i ) \n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n }\n } #pragma omp parallel for firstprivate(k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,i ) ", "context_chars": 100, "text": "----------*/\n #pragma omp parallel for firstprivate(k ,i ) \n for (i = 0; i <= ISIZ2+1; i++) {\n for (k = 0; k <= ISIZ3+1; k++) {\n phi1[i][k] = 0.0;\n phi2[i][k] = 0.0;\n } #pragma omp parallel for firstprivate(k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "im1, u51im1;\n double u21jm1, u31jm1, u41jm1, u51jm1;\n double u21km1, u31km1, u41km1, u51km1;\n\n for (i = 0; i <= nx-1; i++) {\n #pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) \n for (j = 0; j <= ny-1; j++) {\n #pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) \n for (k = 0; k <= nz-1; k++) {\n\t#pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = - frct[i][j][k][m];\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) ", "context_chars": 100, "text": "e u21km1, u31km1, u41km1, u51km1;\n\n #pragma omp parallel for \n for (i = 0; i <= nx-1; i++) {\n for (j = 0; j <= ny-1; j++) {\n #pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) \n for (k = 0; k <= nz-1; k++) {\n\t#pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = - frct[i][j][k][m];\n\t}\n }\n } #pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) ", "context_chars": 100, "text": "gma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) \n for (j = 0; j <= ny-1; j++) {\n for (k = 0; k <= nz-1; k++) {\n\t#pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = - frct[i][j][k][m];\n\t}\n } #pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) \n for (k = 0; k <= nz-1; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = - frct[i][j][k][m];\n\t} #pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s\n--------------------------------------------------------------------*/\n\n L1 = 0;\n L2 = nx-1;\n\n for (i = L1; i <= L2; i++) {\n #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) \n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][1];\n\tu21 = u[i][j][k][1] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u21 + C2 * \n\t ( u[i][j][k][4] - q );\n\tflux[i][j][k][2] = u[i][j][k][2] * u21;\n\tflux[i][j][k][3] = u[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;\n }\n } \n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) ", "context_chars": 100, "text": "---------*/\n\n L1 = 0;\n L2 = nx-1;\n\n #pragma omp parallel for \n for (i = L1; i <= L2; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][1];\n\tu21 = u[i][j][k][1] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u21 + C2 * \n\t ( u[i][j][k][4] - q );\n\tflux[i][j][k][2] = u[i][j][k][2] * u21;\n\tflux[i][j][k][3] = u[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;\n }\n } #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) ", "context_chars": 100, "text": "or firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) \n for (j = jst; j <= jend; j++) {\n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][1];\n\tu21 = u[i][j][k][1] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u21 + C2 * \n\t ( u[i][j][k][4] - q );\n\tflux[i][j][k][2] = u[i][j][k][2] * u21;\n\tflux[i][j][k][3] = u[i][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;\n } #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "][j][k][3] * u21;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;\n }\n } \n } \n\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n\n L2 = nx-1;\n\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21i = tmp * u[i][j][k][1];\n\tu31i = tmp * u[i][j][k][2];\n\tu41i = tmp * u[i][j][k][3];\n\tu51i = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i-1][j][k][0];\n\n\tu21im1 = tmp * u[i-1][j][k][1];\n\tu31im1 = tmp * u[i-1][j][k][2];\n\tu41im1 = tmp * u[i-1][j][k][3];\n\tu51im1 = tmp * u[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )\n\t\t - ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )\n\t + (1.0/6.0)\n\t * tx3 * ( pow2(u21i) - pow2(u21im1) )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dx1 * tx1 * ( u[i-1][j][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i+1][j][k][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( u[i-1][j][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i+1][j][k][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( u[i-1][j][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i+1][j][k][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( u[i-1][j][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i+1][j][k][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( u[i-1][j][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i+1][j][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[1][j][k][m] = rsd[1][j][k][m]\n\t - dssp * ( + 5.0 * u[1][j][k][m]\n\t\t - 4.0 * u[2][j][k][m]\n\t\t + u[3][j][k][m] );\n\trsd[2][j][k][m] = rsd[2][j][k][m]\n\t - dssp * ( - 4.0 * u[1][j][k][m]\n\t\t + 6.0 * u[2][j][k][m]\n\t\t - 4.0 * u[3][j][k][m]\n\t\t + u[4][j][k][m] );\n }\n\n ist1 = 3;\n iend1 = nx - 4;\n\n for (i = ist1; i <= iend1; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i-2][j][k][m]\n\t\t\t\t - 4.0 * u[i-1][j][k][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i+1][j][k][m]\n\t\t\t\t + u[i+2][j][k][m] );\n\t}\n }\n\n\n for (m = 0; m < 5; m++) {\n\trsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]\n\t - dssp * ( u[nx-5][j][k][m]\n\t\t\t\t - 4.0 * u[nx-4][j][k][m]\n\t\t\t\t + 6.0 * u[nx-3][j][k][m]\n\t\t\t\t - 4.0 * u[nx-2][j][k][m] );\n\trsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]\n\t - dssp * ( u[nx-4][j][k][m]\n\t\t\t\t - 4.0 * u[nx-3][j][k][m]\n\t\t\t\t + 5.0 * u[nx-2][j][k][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) ", "context_chars": 100, "text": "* q ) * u21;\n }\n } \n } \n\n #pragma omp parallel for \n for (j = jst; j <= jend; j++) {\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n\n L2 = nx-1;\n\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21i = tmp * u[i][j][k][1];\n\tu31i = tmp * u[i][j][k][2];\n\tu41i = tmp * u[i][j][k][3];\n\tu51i = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i-1][j][k][0];\n\n\tu21im1 = tmp * u[i-1][j][k][1];\n\tu31im1 = tmp * u[i-1][j][k][2];\n\tu41im1 = tmp * u[i-1][j][k][3];\n\tu51im1 = tmp * u[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )\n\t\t - ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )\n\t + (1.0/6.0)\n\t * tx3 * ( pow2(u21i) - pow2(u21im1) )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dx1 * tx1 * ( u[i-1][j][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i+1][j][k][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( u[i-1][j][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i+1][j][k][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( u[i-1][j][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i+1][j][k][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( u[i-1][j][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i+1][j][k][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( u[i-1][j][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i+1][j][k][4] );\n }\n\n/*--------------------------------------------------------------------\nc Fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[1][j][k][m] = rsd[1][j][k][m]\n\t - dssp * ( + 5.0 * u[1][j][k][m]\n\t\t - 4.0 * u[2][j][k][m]\n\t\t + u[3][j][k][m] );\n\trsd[2][j][k][m] = rsd[2][j][k][m]\n\t - dssp * ( - 4.0 * u[1][j][k][m]\n\t\t + 6.0 * u[2][j][k][m]\n\t\t - 4.0 * u[3][j][k][m]\n\t\t + u[4][j][k][m] );\n }\n\n ist1 = 3;\n iend1 = nx - 4;\n\n for (i = ist1; i <= iend1; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i-2][j][k][m]\n\t\t\t\t - 4.0 * u[i-1][j][k][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i+1][j][k][m]\n\t\t\t\t + u[i+2][j][k][m] );\n\t}\n }\n\n\n for (m = 0; m < 5; m++) {\n\trsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]\n\t - dssp * ( u[nx-5][j][k][m]\n\t\t\t\t - 4.0 * u[nx-4][j][k][m]\n\t\t\t\t + 6.0 * u[nx-3][j][k][m]\n\t\t\t\t - 4.0 * u[nx-2][j][k][m] );\n\trsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]\n\t - dssp * ( u[nx-4][j][k][m]\n\t\t\t\t - 4.0 * u[nx-3][j][k][m]\n\t\t\t\t + 5.0 * u[nx-2][j][k][m] );\n }\n } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) ", "context_chars": 100, "text": "tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n for (k = 1; k <= nz - 2; k++) {\n for (i = ist; i <= iend; i++) {\n\t#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) ", "context_chars": 100, "text": "d ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) \n for (i = ist; i <= iend; i++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t} #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) ", "context_chars": 100, "text": "k][m]\n\t - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );\n\t}\n }\n\n L2 = nx-1;\n\n for (i = ist; i <= L2; i++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21i = tmp * u[i][j][k][1];\n\tu31i = tmp * u[i][j][k][2];\n\tu41i = tmp * u[i][j][k][3];\n\tu51i = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i-1][j][k][0];\n\n\tu21im1 = tmp * u[i-1][j][k][1];\n\tu31im1 = tmp * u[i-1][j][k][2];\n\tu41im1 = tmp * u[i-1][j][k][3];\n\tu51im1 = tmp * u[i-1][j][k][4];\n\n\tflux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);\n\tflux[i][j][k][2] = tx3 * ( u31i - u31im1 );\n\tflux[i][j][k][3] = tx3 * ( u41i - u41im1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )\n\t\t - ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )\n\t + (1.0/6.0)\n\t * tx3 * ( pow2(u21i) - pow2(u21im1) )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) ", "context_chars": 100, "text": ".0)\n\t * tx3 * ( pow2(u21i) - pow2(u21im1) )\n\t + C1 * C5 * tx3 * ( u51i - u51im1 );\n }\n\n for (i = ist; i <= iend; i++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dx1 * tx1 * ( u[i-1][j][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i+1][j][k][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )\n\t + dx2 * tx1 * ( u[i-1][j][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i+1][j][k][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )\n\t + dx3 * tx1 * ( u[i-1][j][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i+1][j][k][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )\n\t + dx4 * tx1 * ( u[i-1][j][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i+1][j][k][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )\n\t + dx5 * tx1 * ( u[i-1][j][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i+1][j][k][4] );\n } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s\n--------------------------------------------------------------------*/\n\n L1 = 0;\n L2 = ny-1;\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (j = L1; j <= L2; j++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][2];\n\tu31 = u[i][j][k][2] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u31;\n\tflux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][3] = u[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) ", "context_chars": 100, "text": "------*/\n\n L1 = 0;\n L2 = ny-1;\n\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n for (j = L1; j <= L2; j++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][2];\n\tu31 = u[i][j][k][2] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u31;\n\tflux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][3] = u[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;\n }\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) ", "context_chars": 100, "text": "l for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) \n for (j = L1; j <= L2; j++) {\n for (k = 1; k <= nz - 2; k++) {\n\tflux[i][j][k][0] = u[i][j][k][2];\n\tu31 = u[i][j][k][2] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u31;\n\tflux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][3] = u[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "[i][j][k][3] * u31;\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;\n }\n }\n }\n\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n\n L2 = ny-1;\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21j = tmp * u[i][j][k][1];\n\tu31j = tmp * u[i][j][k][2];\n\tu41j = tmp * u[i][j][k][3];\n\tu51j = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j-1][k][0];\n\tu21jm1 = tmp * u[i][j-1][k][1];\n\tu31jm1 = tmp * u[i][j-1][k][2];\n\tu41jm1 = tmp * u[i][j-1][k][3];\n\tu51jm1 = tmp * u[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )\n\t\t - ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )\n\t + (1.0/6.0)\n\t * ty3 * ( pow2(u31j) - pow2(u31jm1) )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dy1 * ty1 * ( u[i][j-1][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j+1][k][0] );\n\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( u[i][j-1][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j+1][k][1] );\n\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( u[i][j-1][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j+1][k][2] );\n\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( u[i][j-1][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j+1][k][3] );\n\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( u[i][j-1][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j+1][k][4] );\n\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[i][1][k][m] = rsd[i][1][k][m]\n\t - dssp * ( + 5.0 * u[i][1][k][m]\n\t\t - 4.0 * u[i][2][k][m]\n\t\t + u[i][3][k][m] );\n\trsd[i][2][k][m] = rsd[i][2][k][m]\n\t - dssp * ( - 4.0 * u[i][1][k][m]\n\t\t + 6.0 * u[i][2][k][m]\n\t\t - 4.0 * u[i][3][k][m]\n\t\t + u[i][4][k][m] );\n }\n\n jst1 = 3;\n jend1 = ny - 4;\n for (j = jst1; j <= jend1; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j-2][k][m]\n\t\t\t\t - 4.0 * u[i][j-1][k][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j+1][k][m]\n\t\t\t\t + u[i][j+2][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\trsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]\n\t - dssp * ( u[i][ny-5][k][m]\n\t\t\t\t - 4.0 * u[i][ny-4][k][m]\n\t\t\t\t + 6.0 * u[i][ny-3][k][m]\n\t\t\t\t - 4.0 * u[i][ny-2][k][m] );\n\trsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]\n\t - dssp * ( u[i][ny-4][k][m]\n\t\t\t\t - 4.0 * u[i][ny-3][k][m]\n\t\t\t\t + 5.0 * u[i][ny-2][k][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) ", "context_chars": 100, "text": "2 * q ) * u31;\n }\n }\n }\n\n #pragma omp parallel for \n for (i = ist; i <= iend; i++) {\n for (k = 1; k <= nz - 2; k++) {\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n\n L2 = ny-1;\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21j = tmp * u[i][j][k][1];\n\tu31j = tmp * u[i][j][k][2];\n\tu41j = tmp * u[i][j][k][3];\n\tu51j = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j-1][k][0];\n\tu21jm1 = tmp * u[i][j-1][k][1];\n\tu31jm1 = tmp * u[i][j-1][k][2];\n\tu41jm1 = tmp * u[i][j-1][k][3];\n\tu51jm1 = tmp * u[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )\n\t\t - ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )\n\t + (1.0/6.0)\n\t * ty3 * ( pow2(u31j) - pow2(u31jm1) )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dy1 * ty1 * ( u[i][j-1][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j+1][k][0] );\n\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( u[i][j-1][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j+1][k][1] );\n\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( u[i][j-1][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j+1][k][2] );\n\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( u[i][j-1][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j+1][k][3] );\n\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( u[i][j-1][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j+1][k][4] );\n\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[i][1][k][m] = rsd[i][1][k][m]\n\t - dssp * ( + 5.0 * u[i][1][k][m]\n\t\t - 4.0 * u[i][2][k][m]\n\t\t + u[i][3][k][m] );\n\trsd[i][2][k][m] = rsd[i][2][k][m]\n\t - dssp * ( - 4.0 * u[i][1][k][m]\n\t\t + 6.0 * u[i][2][k][m]\n\t\t - 4.0 * u[i][3][k][m]\n\t\t + u[i][4][k][m] );\n }\n\n jst1 = 3;\n jend1 = ny - 4;\n for (j = jst1; j <= jend1; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j-2][k][m]\n\t\t\t\t - 4.0 * u[i][j-1][k][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j+1][k][m]\n\t\t\t\t + u[i][j+2][k][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\trsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]\n\t - dssp * ( u[i][ny-5][k][m]\n\t\t\t\t - 4.0 * u[i][ny-4][k][m]\n\t\t\t\t + 6.0 * u[i][ny-3][k][m]\n\t\t\t\t - 4.0 * u[i][ny-2][k][m] );\n\trsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]\n\t - dssp * ( u[i][ny-4][k][m]\n\t\t\t\t - 4.0 * u[i][ny-3][k][m]\n\t\t\t\t + 5.0 * u[i][ny-2][k][m] );\n }\n } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) ", "context_chars": 100, "text": "ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n for (k = 1; k <= nz - 2; k++) {\n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) ", "context_chars": 100, "text": "d ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) \n for (j = jst; j <= jend; j++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t} #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) ", "context_chars": 100, "text": "[k][m]\n\t - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );\n\t}\n }\n\n L2 = ny-1;\n for (j = jst; j <= L2; j++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21j = tmp * u[i][j][k][1];\n\tu31j = tmp * u[i][j][k][2];\n\tu41j = tmp * u[i][j][k][3];\n\tu51j = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j-1][k][0];\n\tu21jm1 = tmp * u[i][j-1][k][1];\n\tu31jm1 = tmp * u[i][j-1][k][2];\n\tu41jm1 = tmp * u[i][j-1][k][3];\n\tu51jm1 = tmp * u[i][j-1][k][4];\n\n\tflux[i][j][k][1] = ty3 * ( u21j - u21jm1 );\n\tflux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);\n\tflux[i][j][k][3] = ty3 * ( u41j - u41jm1 );\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )\n\t\t - ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )\n\t + (1.0/6.0)\n\t * ty3 * ( pow2(u31j) - pow2(u31jm1) )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) ", "context_chars": 100, "text": ".0)\n\t * ty3 * ( pow2(u31j) - pow2(u31jm1) )\n\t + C1 * C5 * ty3 * ( u51j - u51jm1 );\n }\n\n for (j = jst; j <= jend; j++) {\n\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dy1 * ty1 * ( u[i][j-1][k][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j+1][k][0] );\n\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )\n\t + dy2 * ty1 * ( u[i][j-1][k][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j+1][k][1] );\n\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )\n\t + dy3 * ty1 * ( u[i][j-1][k][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j+1][k][2] );\n\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )\n\t + dy4 * ty1 * ( u[i][j-1][k][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j+1][k][3] );\n\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )\n\t + dy5 * ty1 * ( u[i][j-1][k][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j+1][k][4] );\n\n } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "direction flux differences\n--------------------------------------------------------------------*/\n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,u41 ,q ,k ,j ,i ) \n for (k = 0; k <= nz-1; k++) {\n\tflux[i][j][k][0] = u[i][j][k][3];\n\tu41 = u[i][j][k][3] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u41;\n\tflux[i][j][k][2] = u[i][j][k][2] * u41; \n\tflux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;\n }\n\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\t#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n }\n\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,u21k ,u31k ,u41k ,u51k ,tmp ,u21km1 ,u31km1 ,u41km1 ,u51km1 ,tz3 ,k ,j ,i ) \n for (k = 1; k <= nz-1; k++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21k = tmp * u[i][j][k][1];\n\tu31k = tmp * u[i][j][k][2];\n\tu41k = tmp * u[i][j][k][3];\n\tu51k = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j][k-1][0];\n\n\tu21km1 = tmp * u[i][j][k-1][1];\n\tu31km1 = tmp * u[i][j][k-1][2];\n\tu41km1 = tmp * u[i][j][k-1][3];\n\tu51km1 = tmp * u[i][j][k-1][4];\n\n\tflux[i][j][k][1] = tz3 * ( u21k - u21km1 );\n\tflux[i][j][k][2] = tz3 * ( u31k - u31km1 );\n\tflux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )\n\t\t - ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )\n\t + (1.0/6.0)\n\t * tz3 * ( pow2(u41k) - pow2(u41km1) )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n }\n\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,tz1 ,dz1 ,dz2 ,tz3 ,dz3 ,dz4 ,dz5 ,k ,j ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dz1 * tz1 * ( u[i][j][k-1][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j][k+1][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )\n\t + dz2 * tz1 * ( u[i][j][k-1][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j][k+1][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )\n\t + dz3 * tz1 * ( u[i][j][k-1][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j][k+1][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )\n\t + dz4 * tz1 * ( u[i][j][k-1][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j][k+1][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )\n\t + dz5 * tz1 * ( u[i][j][k-1][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j][k+1][4] );\n }\n\n/*--------------------------------------------------------------------\nc fourth-order dissipation\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\trsd[i][j][1][m] = rsd[i][j][1][m]\n\t - dssp * ( + 5.0 * u[i][j][1][m]\n\t\t - 4.0 * u[i][j][2][m]\n\t\t + u[i][j][3][m] );\n\trsd[i][j][2][m] = rsd[i][j][2][m]\n\t - dssp * ( - 4.0 * u[i][j][1][m]\n\t\t + 6.0 * u[i][j][2][m]\n\t\t - 4.0 * u[i][j][3][m]\n\t\t + u[i][j][4][m] );\n }\n\n #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) \n for (k = 3; k <= nz - 4; k++) {\n\t#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j][k-2][m]\n\t\t\t\t - 4.0 * u[i][j][k-1][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j][k+1][m]\n\t\t\t\t + u[i][j][k+2][m] );\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\trsd[i][j][nz-3][m] = rsd[i][j][nz-3][m]\n\t - dssp * ( u[i][j][nz-5][m]\n\t\t\t\t - 4.0 * u[i][j][nz-4][m]\n\t\t\t\t + 6.0 * u[i][j][nz-3][m]\n\t\t\t\t - 4.0 * u[i][j][nz-2][m] );\n\trsd[i][j][nz-2][m] = rsd[i][j][nz-2][m]\n\t - dssp * ( u[i][j][nz-4][m]\n\t\t\t\t - 4.0 * u[i][j][nz-3][m]\n\t\t\t\t + 5.0 * u[i][j][nz-2][m] );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,u41 ,q ,k ,j ,i ) ", "context_chars": 100, "text": "ragma omp parallel for \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n for (k = 0; k <= nz-1; k++) {\n\tflux[i][j][k][0] = u[i][j][k][3];\n\tu41 = u[i][j][k][3] / u[i][j][k][0];\n\n\tq = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]\n\t\t + u[i][j][k][2] * u[i][j][k][2]\n\t\t + u[i][j][k][3] * u[i][j][k][3] )\n\t / u[i][j][k][0];\n\n\tflux[i][j][k][1] = u[i][j][k][1] * u41;\n\tflux[i][j][k][2] = u[i][j][k][2] * u41; \n\tflux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,u41 ,q ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) ", "context_chars": 100, "text": "+ C2 * (u[i][j][k][4]-q);\n\tflux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;\n }\n\n for (k = 1; k <= nz - 2; k++) {\n\t#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) ", "context_chars": 100, "text": "p parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) \n for (k = 1; k <= nz - 2; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t} #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,u21k ,u31k ,u41k ,u51k ,tmp ,u21km1 ,u31km1 ,u41km1 ,u51km1 ,tz3 ,k ,j ,i ) ", "context_chars": 100, "text": "][m] = rsd[i][j][k][m]\n\t - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );\n\t}\n }\n\n for (k = 1; k <= nz-1; k++) {\n\ttmp = 1.0 / u[i][j][k][0];\n\n\tu21k = tmp * u[i][j][k][1];\n\tu31k = tmp * u[i][j][k][2];\n\tu41k = tmp * u[i][j][k][3];\n\tu51k = tmp * u[i][j][k][4];\n\n\ttmp = 1.0 / u[i][j][k-1][0];\n\n\tu21km1 = tmp * u[i][j][k-1][1];\n\tu31km1 = tmp * u[i][j][k-1][2];\n\tu41km1 = tmp * u[i][j][k-1][3];\n\tu51km1 = tmp * u[i][j][k-1][4];\n\n\tflux[i][j][k][1] = tz3 * ( u21k - u21km1 );\n\tflux[i][j][k][2] = tz3 * ( u31k - u31km1 );\n\tflux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);\n\tflux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )\n\t * tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )\n\t\t - ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )\n\t + (1.0/6.0)\n\t * tz3 * ( pow2(u41k) - pow2(u41km1) )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,u21k ,u31k ,u41k ,u51k ,tmp ,u21km1 ,u31km1 ,u41km1 ,u51km1 ,tz3 ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,tz1 ,dz1 ,dz2 ,tz3 ,dz3 ,dz4 ,dz5 ,k ,j ,i ) ", "context_chars": 100, "text": ".0)\n\t * tz3 * ( pow2(u41k) - pow2(u41km1) )\n\t + C1 * C5 * tz3 * ( u51k - u51km1 );\n }\n\n for (k = 1; k <= nz - 2; k++) {\n\trsd[i][j][k][0] = rsd[i][j][k][0]\n\t + dz1 * tz1 * ( u[i][j][k-1][0]\n\t\t\t\t - 2.0 * u[i][j][k][0]\n\t\t\t\t + u[i][j][k+1][0] );\n\trsd[i][j][k][1] = rsd[i][j][k][1]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )\n\t + dz2 * tz1 * ( u[i][j][k-1][1]\n\t\t\t\t - 2.0 * u[i][j][k][1]\n\t\t\t\t + u[i][j][k+1][1] );\n\trsd[i][j][k][2] = rsd[i][j][k][2]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )\n\t + dz3 * tz1 * ( u[i][j][k-1][2]\n\t\t\t\t - 2.0 * u[i][j][k][2]\n\t\t\t\t + u[i][j][k+1][2] );\n\trsd[i][j][k][3] = rsd[i][j][k][3]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )\n\t + dz4 * tz1 * ( u[i][j][k-1][3]\n\t\t\t\t - 2.0 * u[i][j][k][3]\n\t\t\t\t + u[i][j][k+1][3] );\n\trsd[i][j][k][4] = rsd[i][j][k][4]\n\t + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )\n\t + dz5 * tz1 * ( u[i][j][k-1][4]\n\t\t\t\t - 2.0 * u[i][j][k][4]\n\t\t\t\t + u[i][j][k+1][4] );\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,tz1 ,dz1 ,dz2 ,tz3 ,dz3 ,dz4 ,dz5 ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) ", "context_chars": 100, "text": "6.0 * u[i][j][2][m]\n\t\t - 4.0 * u[i][j][3][m]\n\t\t + u[i][j][4][m] );\n }\n\n for (k = 3; k <= nz - 4; k++) {\n\t#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j][k-2][m]\n\t\t\t\t - 4.0 * u[i][j][k-1][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j][k+1][m]\n\t\t\t\t + u[i][j][k+2][m] );\n\t}\n } #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) ", "context_chars": 100, "text": " parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) \n for (k = 3; k <= nz - 4; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = rsd[i][j][k][m]\n\t - dssp * ( u[i][j][k-2][m]\n\t\t\t\t - 4.0 * u[i][j][k-1][m]\n\t\t\t\t + 6.0 * u[i][j][k][m]\n\t\t\t\t - 4.0 * u[i][j][k+1][m]\n\t\t\t\t + u[i][j][k+2][m] );\n\t} #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "g the top and bottom faces\n--------------------------------------------------------------------*/\n for (i = 0; i < nx; i++) {\n iglob = i;\n for (j = 0; j < ny; j++) {\n jglob = j;\n exact( iglob, jglob, 0, &u[i][j][0][0] );\n exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "long north and south faces\n--------------------------------------------------------------------*/\n for (i = 0; i < nx; i++) {\n iglob = i;\n for (k = 0; k < nz; k++) {\n exact( iglob, 0, k, &u[i][0][k][0] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " iglob = i;\n for (k = 0; k < nz; k++) {\n exact( iglob, 0, k, &u[i][0][k][0] );\n }\n }\n\n for (i = 0; i < nx; i++) {\n iglob = i;\n for (k = 0; k < nz; k++) {\n exact( iglob, ny0-1, k, &u[i][ny-1][k][0] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " along east and west faces\n--------------------------------------------------------------------*/\n for (j = 0; j < ny; j++) {\n jglob = j;\n for (k = 0; k < nz; k++) {\n exact( 0, jglob, k, &u[0][j][k][0] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " jglob = j;\n for (k = 0; k < nz; k++) {\n exact( 0, jglob, k, &u[0][j][k][0] );\n }\n }\n\n for (j = 0; j < ny; j++) {\n jglob = j;\n for (k = 0; k < nz; k++) {\n exact( nx0-1, jglob, k, &u[nx-1][j][k][0] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "i, peta, pzeta;\n double ue_1jk[5],ue_nx0jk[5],ue_i1k[5],\n ue_iny0k[5],ue_ij1[5],ue_ijnz[5];\n\n for (j = 0; j < ny; j++) {\n jglob = j;\n for (k = 1; k < nz - 1; k++) {\n zeta = ((double)k) / (nz-1);\n if (jglob != 0 && jglob != ny0-1) {\n\teta = ( (double) (jglob) ) / (ny0-1);\n\tfor (i = 0; i < nx; i++) {\n\t iglob = i;\n\t if(iglob != 0 && iglob != nx0-1) {\n\t xi = ( (double) (iglob) ) / (nx0-1);\n\t exact (0,jglob,k,ue_1jk);\n\t exact (nx0-1,jglob,k,ue_nx0jk);\n\t exact (iglob,0,k,ue_i1k);\n\t exact (iglob,ny0-1,k,ue_iny0k);\n\t exact (iglob,jglob,0,ue_ij1);\n\t exact (iglob,jglob,nz-1,ue_ijnz);\n\t #pragma omp parallel for firstprivate(pxi ,peta ,pzeta ,xi ,eta ,zeta ,m ,i ,k ,j ) \n\t for (m = 0; m < 5; m++) {\n\t pxi = ( 1.0 - xi ) * ue_1jk[m]\n\t\t+ xi * ue_nx0jk[m];\n\t peta = ( 1.0 - eta ) * ue_i1k[m]\n\t\t+ eta * ue_iny0k[m];\n\t pzeta = ( 1.0 - zeta ) * ue_ij1[m]\n\t\t+ zeta * ue_ijnz[m];\n\n\t u[i][j][k][m] = pxi + peta + pzeta\n\t\t- pxi * peta - peta * pzeta - pzeta * pxi\n\t\t+ pxi * peta * pzeta;\n\t }\n\t }\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(pxi ,peta ,pzeta ,xi ,eta ,zeta ,m ,i ,k ,j ) ", "context_chars": 100, "text": "b,ny0-1,k,ue_iny0k);\n\t exact (iglob,jglob,0,ue_ij1);\n\t exact (iglob,jglob,nz-1,ue_ijnz);\n\t for (m = 0; m < 5; m++) {\n\t pxi = ( 1.0 - xi ) * ue_1jk[m]\n\t\t+ xi * ue_nx0jk[m];\n\t peta = ( 1.0 - eta ) * ue_i1k[m]\n\t\t+ eta * ue_iny0k[m];\n\t pzeta = ( 1.0 - zeta ) * ue_ij1[m]\n\t\t+ zeta * ue_ijnz[m];\n\n\t u[i][j][k][m] = pxi + peta + pzeta\n\t\t- pxi * peta - peta * pzeta - pzeta * pxi\n\t\t+ pxi * peta * pzeta;\n\t } #pragma omp parallel for firstprivate(pxi ,peta ,pzeta ,xi ,eta ,zeta ,m ,i ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "e, before timestepping).\n--------------------------------------------------------------------*/\n{\n for (i = 0; i < ISIZ1; i++) {\n #pragma omp parallel for private(istep ,i ,j ,k ,m ) \n for (j = 0; j < ISIZ2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < 5; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t a[i][j][k][m] = 0.0;\n\t b[i][j][k][m] = 0.0;\n\t c[i][j][k][m] = 0.0;\n\t d[i][j][k][m] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(istep ,i ,j ,k ,m ) ", "context_chars": 100, "text": "-----*/\n{\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (i = 0; i < ISIZ1; i++) {\n for (j = 0; j < ISIZ2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < 5; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t a[i][j][k][m] = 0.0;\n\t b[i][j][k][m] = 0.0;\n\t c[i][j][k][m] = 0.0;\n\t d[i][j][k][m] = 0.0;\n\t}\n }\n } #pragma omp parallel for private(istep ,i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "{\n #pragma omp parallel for private(istep ,i ,j ,k ,m ) \n for (j = 0; j < ISIZ2; j++) {\n for (k = 0; k < 5; k++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t a[i][j][k][m] = 0.0;\n\t b[i][j][k][m] = 0.0;\n\t c[i][j][k][m] = 0.0;\n\t d[i][j][k][m] = 0.0;\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "; j++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (k = 0; k < 5; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t a[i][j][k][m] = 0.0;\n\t b[i][j][k][m] = 0.0;\n\t c[i][j][k][m] = 0.0;\n\t d[i][j][k][m] = 0.0;\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) ", "context_chars": 100, "text": " perform SSOR iteration\n--------------------------------------------------------------------*/\n for (i = ist; i <= iend; i++) {\n #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) \n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for private(istep ,i ,j ,k ,m ) \n\tfor (k = 1; k <= nz - 2; k++) {\n\t #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) \n\t for (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n\t }\n\t}\n }\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) ", "context_chars": 100, "text": "rivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) \n for (i = ist; i <= iend; i++) {\n for (j = jst; j <= jend; j++) {\n\t#pragma omp parallel for private(istep ,i ,j ,k ,m ) \n\tfor (k = 1; k <= nz - 2; k++) {\n\t #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) \n\t for (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n\t }\n\t}\n } #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for private(istep ,i ,j ,k ,m ) ", "context_chars": 100, "text": "stprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) \n for (j = jst; j <= jend; j++) {\n\tfor (k = 1; k <= nz - 2; k++) {\n\t #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) \n\t for (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n\t }\n\t} #pragma omp parallel for private(istep ,i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) ", "context_chars": 100, "text": "; j++) {\n\t#pragma omp parallel for private(istep ,i ,j ,k ,m ) \n\tfor (k = 1; k <= nz - 2; k++) {\n\t for (m = 0; m < 5; m++) {\n\t rsd[i][j][k][m] = dt * rsd[i][j][k][m];\n\t } #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ) ", "context_chars": 100, "text": "------------------------------------*/\n epsilon = 1.0e-08;\n\n *class = 'U';\n *verified = TRUE;\n\n for (m = 0; m < 5; m++) {\n xcrref[m] = 1.0;\n xceref[m] = 1.0;\n } #pragma omp parallel for firstprivate(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/LU/lu.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(xcr ,xce ,m ) ", "context_chars": 100, "text": "he known reference values.\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n \n xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);\n xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);\n \n } #pragma omp parallel for firstprivate(xcr ,xce ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ) ", "context_chars": 100, "text": "space\nc-------------------------------------------------------------------*/\n\n int i, j, k;\n\n for (k = 0; k < d[2]; k++) {\n\tfor (j = 0; j < d[1]; j++) {\n for (i = 0; i < d[0]; i++) {\n\t crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);\n\t }\n\t}\n } #pragma omp parallel for private(i ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "006 format(' WARNING: compiled for ', i5, ' processes. ',\n > ' Will not verify. ')*/\n\n for (i = 0;i < 3 ; i++) {\n\tdims[i][0] = NX;\n\tdims[i][1] = NY;\n\tdims[i][2] = NZ;\n } #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": " ) \n for (i = 0;i < 3 ; i++) {\n\tdims[i][0] = NX;\n\tdims[i][1] = NY;\n\tdims[i][2] = NZ;\n }\n\n\n for (i = 0; i < 3; i++) {\n\txstart[i] = 1;\n\txend[i] = NX;\n\tystart[i] = 1;\n yend[i] = NY;\n zstart[i] = 1;\n zend[i] = NZ;\n } #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,ii ,ii2 ,jj ,ij2 ,kk ) ", "context_chars": 100, "text": "c mod(i-1+n/2, n) - n/2\nc-------------------------------------------------------------------*/\n\n for (i = 0; i < dims[2][0]; i++) {\n\tii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;\n\tii2 = ii*ii;\n\t#pragma omp parallel for firstprivate(k ,j ,ii ,ii2 ,jj ,ij2 ,kk ,indexmap ,i ) \n\tfor (j = 0; j < dims[2][1]; j++) {\n jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;\n ij2 = jj*jj+ii2;\n #pragma omp parallel for firstprivate(k ,j ,ii ,ii2 ,jj ,ij2 ,kk ,indexmap ,i ) \n for (k = 0; k < dims[2][2]; k++) {\n\t\tkk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;\n\t\tindexmap[k][j][i] = kk*kk+ij2;\n\t }\n\t}\n } #pragma omp parallel for private(i ,j ,k ,ii ,ii2 ,jj ,ij2 ,kk ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,ii ,ii2 ,jj ,ij2 ,kk ,indexmap ,i ) ", "context_chars": 100, "text": "k ) \n for (i = 0; i < dims[2][0]; i++) {\n\tii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;\n\tii2 = ii*ii;\n\tfor (j = 0; j < dims[2][1]; j++) {\n jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;\n ij2 = jj*jj+ii2;\n #pragma omp parallel for firstprivate(k ,j ,ii ,ii2 ,jj ,ij2 ,kk ,indexmap ,i ) \n for (k = 0; k < dims[2][2]; k++) {\n\t\tkk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;\n\t\tindexmap[k][j][i] = kk*kk+ij2;\n\t }\n\t} #pragma omp parallel for firstprivate(k ,j ,ii ,ii2 ,jj ,ij2 ,kk ,indexmap ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,ii ,ii2 ,jj ,ij2 ,kk ,indexmap ,i ) ", "context_chars": 100, "text": " j++) {\n jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;\n ij2 = jj*jj+ii2;\n for (k = 0; k < dims[2][2]; k++) {\n\t\tkk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;\n\t\tindexmap[k][j][i] = kk*kk+ij2;\n\t } #pragma omp parallel for firstprivate(k ,j ,ii ,ii2 ,jj ,ij2 ,kk ,indexmap ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d ,i ) ", "context_chars": 100, "text": "-----------------------------------------------------*/\n\n int logd[3];\n int i, j, k, jj;\n\n for (i = 0; i < 3; i++) {\n\tlogd[i] = ilog2(d[i]);\n } #pragma omp parallel for firstprivate(d ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,jj ) ", "context_chars": 100, "text": "{\n\tlogd[i] = ilog2(d[i]);\n }\n\n{\ndcomplex y0[NX][FFTBLOCKPAD];\ndcomplex y1[NX][FFTBLOCKPAD];\n\n for (k = 0; k < d[2]; k++) {\n\tfor (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) {\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n #pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) \n for (j = 0; j < fftblock; j++) {\n\t\t#pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) \n\t\tfor (i = 0; i < d[0]; i++) {\n\n\t\t y0[i][j].real = x[k][j+jj][i].real;\n\n\t\t y0[i][j].imag = x[k][j+jj][i].imag;\n\t\t}\n\t }\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n \n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */\n cfftz (is, logd[0],\n\t\t d[0], y0, y1);\n\t \n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n #pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) \n for (j = 0; j < fftblock; j++) {\n\t\tfor (i = 0; i < d[0]; i++) {\n\t\t xout[k][j+jj][i].real = y0[i][j].real;\n\t\t xout[k][j+jj][i].imag = y0[i][j].imag;\n\t\t}\n\t }\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n\t}\n } #pragma omp parallel for private(i ,j ,k ,jj ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) ", "context_chars": 100, "text": "ock; jj+=fftblock) {\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n for (j = 0; j < fftblock; j++) {\n\t\t#pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) \n\t\tfor (i = 0; i < d[0]; i++) {\n\n\t\t y0[i][j].real = x[k][j+jj][i].real;\n\n\t\t y0[i][j].imag = x[k][j+jj][i].imag;\n\t\t}\n\t } #pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) ", "context_chars": 100, "text": "arallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) \n for (j = 0; j < fftblock; j++) {\n\t\tfor (i = 0; i < d[0]; i++) {\n\n\t\t y0[i][j].real = x[k][j+jj][i].real;\n\n\t\t y0[i][j].imag = x[k][j+jj][i].imag;\n\t\t} #pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) ", "context_chars": 100, "text": "r_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n for (j = 0; j < fftblock; j++) {\n\t\tfor (i = 0; i < d[0]; i++) {\n\t\t xout[k][j+jj][i].real = y0[i][j].real;\n\t\t xout[k][j+jj][i].imag = y0[i][j].imag;\n\t\t}\n\t } #pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d ,i ) ", "context_chars": 100, "text": "-----------------------------------------------------*/\n\n int logd[3];\n int i, j, k, ii;\n\n for (i = 0; i < 3; i++) {\n\tlogd[i] = ilog2(d[i]);\n } #pragma omp parallel for firstprivate(d ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,ii ) ", "context_chars": 100, "text": " {\n\tlogd[i] = ilog2(d[i]);\n }\n{\ndcomplex y0[NX][FFTBLOCKPAD];\ndcomplex y1[NX][FFTBLOCKPAD];\n\n for (k = 0; k < d[2]; k++) {\n for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n\t #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) \n\t for (j = 0; j < d[1]; j++) {\n\t\t#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) \n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[j][i].real = x[k][j][i+ii].real;\n\t\t y0[j][i].imag = x[k][j][i+ii].imag;\n\t\t}\n\t }\n/*\t if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */\n\t cfftz (is, logd[1], \n\t\t d[1], y0, y1);\n \n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) \n for (j = 0; j < d[1]; j++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\t xout[k][j][i+ii].real = y0[j][i].real;\n\t\t xout[k][j][i+ii].imag = y0[j][i].imag;\n\t }\n\t }\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n\t}\n } #pragma omp parallel for private(i ,j ,k ,ii ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) ", "context_chars": 100, "text": "d[0] - fftblock; ii+=fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n\t for (j = 0; j < d[1]; j++) {\n\t\t#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) \n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[j][i].real = x[k][j][i+ii].real;\n\t\t y0[j][i].imag = x[k][j][i+ii].imag;\n\t\t}\n\t } #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) ", "context_chars": 100, "text": "ragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) \n\t for (j = 0; j < d[1]; j++) {\n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[j][i].real = x[k][j][i+ii].real;\n\t\t y0[j][i].imag = x[k][j][i+ii].imag;\n\t\t} #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) ", "context_chars": 100, "text": "er_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n for (j = 0; j < d[1]; j++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\t xout[k][j][i+ii].real = y0[j][i].real;\n\t\t xout[k][j][i+ii].imag = y0[j][i].imag;\n\t }\n\t } #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d ,i ) ", "context_chars": 100, "text": "-----------------------------------------------------*/\n\n int logd[3];\n int i, j, k, ii;\n\n for (i = 0;i < 3; i++) {\n\tlogd[i] = ilog2(d[i]);\n } #pragma omp parallel for firstprivate(d ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,ii ) ", "context_chars": 100, "text": " {\n\tlogd[i] = ilog2(d[i]);\n }\n{\ndcomplex y0[NX][FFTBLOCKPAD];\ndcomplex y1[NX][FFTBLOCKPAD];\n\n for (j = 0; j < d[1]; j++) {\n for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n\t #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) \n\t for (k = 0; k < d[2]; k++) {\n\t\t#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) \n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[k][i].real = x[k][j][i+ii].real;\n\t\t y0[k][i].imag = x[k][j][i+ii].imag;\n\t\t}\n\t }\n\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */\n cfftz (is, logd[2],\n\t\t d[2], y0, y1);\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) \n for (k = 0; k < d[2]; k++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\t xout[k][j][i+ii].real = y0[k][i].real;\n\t\t xout[k][j][i+ii].imag = y0[k][i].imag;\n\t }\n\t }\n/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */\n\t}\n } #pragma omp parallel for private(i ,j ,k ,ii ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) ", "context_chars": 100, "text": "d[0] - fftblock; ii+=fftblock) {\n/*\t if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n\t for (k = 0; k < d[2]; k++) {\n\t\t#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) \n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[k][i].real = x[k][j][i+ii].real;\n\t\t y0[k][i].imag = x[k][j][i+ii].imag;\n\t\t}\n\t } #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) ", "context_chars": 100, "text": "ragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) \n\t for (k = 0; k < d[2]; k++) {\n\t\tfor (i = 0; i < fftblock; i++) {\n\t\t y0[k][i].real = x[k][j][i+ii].real;\n\t\t y0[k][i].imag = x[k][j][i+ii].imag;\n\t\t} #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) ", "context_chars": 100, "text": "r_stop(T_FFTLOW); */\n/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */\n for (k = 0; k < d[2]; k++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\t xout[k][j][i+ii].real = y0[k][i].real;\n\t\t xout[k][j][i+ii].imag = y0[k][i].imag;\n\t }\n\t } #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(fftblock ,y ,x ,i ,j ) ", "context_chars": 100, "text": "-------------------------------------------*/\n if (m % 2 == 1) {\n\tfor (j = 0; j < n; j++) {\n\t for (i = 0; i < fftblock; i++) {\n\t\tx[j][i].real = y[j][i].real;\n\t\tx[j][i].imag = y[j][i].imag;\n\t } #pragma omp parallel for firstprivate(fftblock ,y ,x ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " int j, q,r,s, ierr;\n dcomplex chk,allchk;\n \n chk.real = 0.0;\n chk.imag = 0.0;\n\n\n for (j = 1; j <= 1024; j++) {\n\tq = j%NX+1;\n\tif (q >= xstart[0] && q <= xend[0]) {\n r = (3*j)%NY+1;\n if (r >= ystart[0] && r <= yend[0]) {\n\t\ts = (5*j)%NZ+1;\n\t\tif (s >= zstart[0] && s <= zend[0]) {\n\t\t cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]);\n\t\t}\n\t }\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": " of update to the vector u\nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": "-----------*/\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": "gma omp parallel for firstprivate(i ,j ,k ,m ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];\n\t} #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(rms ,m ) ", "context_chars": 100, "text": "-------------------------------*/\n\n int i, j, k, m, d;\n double xi, eta, zeta, u_exact[5], add;\n\n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n } #pragma omp parallel for firstprivate(rms ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(add ,rms ,m ,k ,j ,i ) ", "context_chars": 100, "text": " k <= grid_points[2]-1; k++) {\n\tzeta = (double)k * dnzm1;\n\texact_solution(xi, eta, zeta, u_exact);\n\tfor (m = 0; m < 5; m++) {\n\t add = u[m][i][j][k] - u_exact[m];\n\t rms[m] = rms[m] + add*add;\n\t} #pragma omp parallel for firstprivate(add ,rms ,m ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d ,m ,rms ) ", "context_chars": 100, "text": "; m++) {\n\t add = u[m][i][j][k] - u_exact[m];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for firstprivate(m ,rms ) \n for (d = 0; d < 3; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n }\n rms[m] = sqrt(rms[m]);\n } #pragma omp parallel for firstprivate(d ,m ,rms ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,rms ) ", "context_chars": 100, "text": " }\n }\n }\n\n #pragma omp parallel for firstprivate(d ,m ,rms ) \n for (m = 0; m < 5; m++) {\n for (d = 0; d < 3; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n } #pragma omp parallel for firstprivate(m ,rms ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(rms ,m ) ", "context_chars": 100, "text": "----------------------------------------------------------*/\n\n int i, j, k, d, m;\n double add;\n\n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n } #pragma omp parallel for firstprivate(rms ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(rms ,m ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(rms ,m ) \n for (m = 0; m < 5; m++) {\n rms[m] = 0.0;\n }\n\n for (i = 0; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(rms ,m ) \n for (j = 0; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(rms ,m ) \n for (k = 0; k <= grid_points[2]-2; k++) {\n\t#pragma omp parallel for firstprivate(m ,j ,k ,add ,rms ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(rms ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(rms ,m ) ", "context_chars": 100, "text": "\n\n #pragma omp parallel for firstprivate(rms ,m ) \n for (i = 0; i <= grid_points[0]-2; i++) {\n for (j = 0; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(rms ,m ) \n for (k = 0; k <= grid_points[2]-2; k++) {\n\t#pragma omp parallel for firstprivate(m ,j ,k ,add ,rms ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n } #pragma omp parallel for firstprivate(rms ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(rms ,m ) ", "context_chars": 100, "text": "#pragma omp parallel for firstprivate(rms ,m ) \n for (j = 0; j <= grid_points[1]-2; j++) {\n for (k = 0; k <= grid_points[2]-2; k++) {\n\t#pragma omp parallel for firstprivate(m ,j ,k ,add ,rms ,i ) \n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n } #pragma omp parallel for firstprivate(rms ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,j ,k ,add ,rms ,i ) ", "context_chars": 100, "text": " #pragma omp parallel for firstprivate(rms ,m ) \n for (k = 0; k <= grid_points[2]-2; k++) {\n\tfor (m = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t} #pragma omp parallel for firstprivate(m ,j ,k ,add ,rms ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(d ,m ,rms ) ", "context_chars": 100, "text": " = 0; m < 5; m++) {\n\t add = rhs[m][i][j][k];\n\t rms[m] = rms[m] + add*add;\n\t}\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for firstprivate(m ,rms ) \n for (d = 0; d < 3; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n }\n rms[m] = sqrt(rms[m]);\n } #pragma omp parallel for firstprivate(d ,m ,rms ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,rms ) ", "context_chars": 100, "text": " }\n }\n }\n\n #pragma omp parallel for firstprivate(d ,m ,rms ) \n for (m = 0; m < 5; m++) {\n for (d = 0; d < 3; d++) {\n rms[m] = rms[m] / (double)(grid_points[d]-2);\n } #pragma omp parallel for firstprivate(m ,rms ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,i ,m ) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for firstprivate(k ,j ,i ,m ) \n for (i = 0; i <= grid_points[0]-1; i++) {\n #pragma omp parallel for firstprivate(k ,j ,i ,m ) \n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for firstprivate(k ,j ,i ,m ) \n\tfor (k= 0; k <= grid_points[2]-1; k++) {\n\t forcing[m][i][j][k] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(k ,j ,i ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,i ,m ) ", "context_chars": 100, "text": "-----------*/\n #pragma omp parallel for firstprivate(k ,j ,i ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 0; i <= grid_points[0]-1; i++) {\n #pragma omp parallel for firstprivate(k ,j ,i ,m ) \n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for firstprivate(k ,j ,i ,m ) \n\tfor (k= 0; k <= grid_points[2]-1; k++) {\n\t forcing[m][i][j][k] = 0.0;\n\t}\n }\n } #pragma omp parallel for firstprivate(k ,j ,i ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,i ,m ) ", "context_chars": 100, "text": "gma omp parallel for firstprivate(k ,j ,i ,m ) \n for (i = 0; i <= grid_points[0]-1; i++) {\n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for firstprivate(k ,j ,i ,m ) \n\tfor (k= 0; k <= grid_points[2]-1; k++) {\n\t forcing[m][i][j][k] = 0.0;\n\t}\n } #pragma omp parallel for firstprivate(k ,j ,i ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,i ,m ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(k ,j ,i ,m ) \n for (j = 0; j <= grid_points[1]-1; j++) {\n\tfor (k= 0; k <= grid_points[2]-1; k++) {\n\t forcing[m][i][j][k] = 0.0;\n\t} #pragma omp parallel for firstprivate(k ,j ,i ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,i ,j ,k ) ", "context_chars": 100, "text": " 0; i <= grid_points[0]-1; i++) {\n\txi = (double)i * dnxm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[m][i] = dtemp[m];\n\t} #pragma omp parallel for firstprivate(m ,i ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dtpp ,m ,i ,j ,k ) ", "context_chars": 100, "text": "vate(m ,i ,j ,k ) \n\tfor (m = 0; m < 5; m++) {\n\t ue[m][i] = dtemp[m];\n\t}\n\n\tdtpp = 1.0 / dtemp[0];\n\n\tfor (m = 1; m < 5; m++) {\n\t buf[m][i] = dtpp * dtemp[m];\n\t} #pragma omp parallel for firstprivate(dtpp ,m ,i ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1 ,i ,j ,k ) ", "context_chars": 100, "text": "[i] = 0.5 * (buf[1][i]*ue[1][i] + buf[2][i]*ue[2][i]\n\t\t + buf[3][i]*ue[3][i]);\n }\n \n for (i = 1; i <= grid_points[0]-2; i++) {\n\tim1 = i-1;\n\tip1 = i+1;\n\n\tforcing[0][i][j][k] = forcing[0][i][j][k] -\n\t tx2*( ue[1][ip1]-ue[1][im1] )+\n\t dx1tx1*(ue[0][ip1]-2.0*ue[0][i]+ue[0][im1]);\n\n\tforcing[1][i][j][k] = forcing[1][i][j][k]\n\t - tx2 * ((ue[1][ip1]*buf[1][ip1]+c2*(ue[4][ip1]-q[ip1]))-\n (ue[1][im1]*buf[1][im1]+c2*(ue[4][im1]-q[im1])))+\n\t xxcon1*(buf[1][ip1]-2.0*buf[1][i]+buf[1][im1])+\n\t dx2tx1*( ue[1][ip1]-2.0* ue[1][i]+ue[1][im1]);\n\n\tforcing[2][i][j][k] = forcing[2][i][j][k]\n\t - tx2 * (ue[2][ip1]*buf[1][ip1]-ue[2][im1]*buf[1][im1])+\n\t xxcon2*(buf[2][ip1]-2.0*buf[2][i]+buf[2][im1])+\n\t dx3tx1*( ue[2][ip1]-2.0*ue[2][i] +ue[2][im1]);\n \n\tforcing[3][i][j][k] = forcing[3][i][j][k]\n\t - tx2*(ue[3][ip1]*buf[1][ip1]-ue[3][im1]*buf[1][im1])+\n\t xxcon2*(buf[3][ip1]-2.0*buf[3][i]+buf[3][im1])+\n\t dx4tx1*( ue[3][ip1]-2.0* ue[3][i]+ ue[3][im1]);\n\n\tforcing[4][i][j][k] = forcing[4][i][j][k]\n\t - tx2*(buf[1][ip1]*(c1*ue[4][ip1]-c2*q[ip1])-\n\t\t buf[1][im1]*(c1*ue[4][im1]-c2*q[im1]))+\n\t 0.5*xxcon3*(buf[0][ip1]-2.0*buf[0][i]+\n\t\t buf[0][im1])+\n\t xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+\n\t xxcon5*(buf[4][ip1]-2.0*buf[4][i]+buf[4][im1])+\n\t dx5tx1*( ue[4][ip1]-2.0* ue[4][i]+ ue[4][im1]);\n } #pragma omp parallel for firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1 ,i ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,m ,j ,k ) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\ti = 1;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (5.0*ue[m][i] - 4.0*ue[m][i+1] +ue[m][i+2]);\n\ti = 2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (-4.0*ue[m][i-1] + 6.0*ue[m][i] -\n \t 4.0*ue[m][i+1] + ue[m][i+2]);\n } #pragma omp parallel for firstprivate(dssp ,m ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,dssp ,m ,j ,k ) ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[m][i-1] + 6.0*ue[m][i] -\n \t 4.0*ue[m][i+1] + ue[m][i+2]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for firstprivate(i ,dssp ,m ,j ,k ) \n\tfor (i = 3; i <= grid_points[0]-4; i++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][i-2] - 4.0*ue[m][i-1] +\n\t 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]);\n\t}\n } #pragma omp parallel for firstprivate(i ,dssp ,m ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,dssp ,m ,j ,k ) ", "context_chars": 100, "text": "}\n\n #pragma omp parallel for firstprivate(i ,dssp ,m ,j ,k ) \n for (m = 0; m < 5; m++) {\n\tfor (i = 3; i <= grid_points[0]-4; i++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][i-2] - 4.0*ue[m][i-1] +\n\t 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]);\n\t} #pragma omp parallel for firstprivate(i ,dssp ,m ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,i ,m ,j ,k ) ", "context_chars": 100, "text": "(ue[m][i-2] - 4.0*ue[m][i-1] +\n\t 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\ti = grid_points[0]-3;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][i-2] - 4.0*ue[m][i-1] +\n\t 6.0*ue[m][i] - 4.0*ue[m][i+1]);\n\ti = grid_points[0]-2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][i-2] - 4.0*ue[m][i-1] + 5.0*ue[m][i]);\n } #pragma omp parallel for firstprivate(dssp ,i ,m ,j ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,j ,i ,k ) ", "context_chars": 100, "text": "0; j <= grid_points[1]-1; j++) {\n\teta = (double)j * dnym1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[m][j] = dtemp[m];\n\t} #pragma omp parallel for firstprivate(m ,j ,i ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dtpp ,m ,j ,i ,k ) ", "context_chars": 100, "text": "private(m ,j ,i ,k ) \n\tfor (m = 0; m < 5; m++) {\n\t ue[m][j] = dtemp[m];\n\t}\n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m < 5; m++) {\n\t buf[m][j] = dtpp * dtemp[m];\n\t} #pragma omp parallel for firstprivate(dtpp ,m ,j ,i ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dy1ty1 ,ty2 ,dy2ty1 ,yycon2 ,dy3ty1 ,yycon1 ,c2 ,dy4ty1 ,dy5ty1 ,yycon5 ,yycon4 ,yycon3 ,c1 ,j ,i ,k ) ", "context_chars": 100, "text": "];\n\tq[j] = 0.5*(buf[1][j]*ue[1][j] + buf[2][j]*ue[2][j] +\n\t\t buf[3][j]*ue[3][j]);\n }\n\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tjm1 = j-1;\n\tjp1 = j+1;\n \n\tforcing[0][i][j][k] = forcing[0][i][j][k] -\n\t ty2*( ue[2][jp1]-ue[2][jm1] )+\n\t dy1ty1*(ue[0][jp1]-2.0*ue[0][j]+ue[0][jm1]);\n\n\tforcing[1][i][j][k] = forcing[1][i][j][k]\n\t - ty2*(ue[1][jp1]*buf[2][jp1]-ue[1][jm1]*buf[2][jm1])+\n\t yycon2*(buf[1][jp1]-2.0*buf[1][j]+buf[1][jm1])+\n\t dy2ty1*( ue[1][jp1]-2.0* ue[1][j]+ ue[1][jm1]);\n\n\tforcing[2][i][j][k] = forcing[2][i][j][k]\n\t - ty2*((ue[2][jp1]*buf[2][jp1]+c2*(ue[4][jp1]-q[jp1]))-\n\t\t (ue[2][jm1]*buf[2][jm1]+c2*(ue[4][jm1]-q[jm1])))+\n\t yycon1*(buf[2][jp1]-2.0*buf[2][j]+buf[2][jm1])+\n\t dy3ty1*( ue[2][jp1]-2.0*ue[2][j] +ue[2][jm1]);\n\n\tforcing[3][i][j][k] = forcing[3][i][j][k]\n\t - ty2*(ue[3][jp1]*buf[2][jp1]-ue[3][jm1]*buf[2][jm1])+\n\t yycon2*(buf[3][jp1]-2.0*buf[3][j]+buf[3][jm1])+\n\t dy4ty1*( ue[3][jp1]-2.0*ue[3][j]+ ue[3][jm1]);\n\n\tforcing[4][i][j][k] = forcing[4][i][j][k]\n\t - ty2*(buf[2][jp1]*(c1*ue[4][jp1]-c2*q[jp1])-\n\t\t buf[2][jm1]*(c1*ue[4][jm1]-c2*q[jm1]))+\n\t 0.5*yycon3*(buf[0][jp1]-2.0*buf[0][j]+\n\t\t buf[0][jm1])+\n\t yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+\n\t yycon5*(buf[4][jp1]-2.0*buf[4][j]+buf[4][jm1])+\n\t dy5ty1*(ue[4][jp1]-2.0*ue[4][j]+ue[4][jm1]);\n } #pragma omp parallel for firstprivate(dy1ty1 ,ty2 ,dy2ty1 ,yycon2 ,dy3ty1 ,yycon1 ,c2 ,dy4ty1 ,dy5ty1 ,yycon5 ,yycon4 ,yycon3 ,c1 ,j ,i ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,m ,i ,k ) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tj = 1;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (5.0*ue[m][j] - 4.0*ue[m][j+1] +ue[m][j+2]);\n\tj = 2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (-4.0*ue[m][j-1] + 6.0*ue[m][j] -\n\t 4.0*ue[m][j+1] + ue[m][j+2]);\n } #pragma omp parallel for firstprivate(dssp ,m ,i ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,dssp ,m ,i ,k ) ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[m][j-1] + 6.0*ue[m][j] -\n\t 4.0*ue[m][j+1] + ue[m][j+2]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for firstprivate(j ,dssp ,m ,i ,k ) \n\tfor (j = 3; j <= grid_points[1]-4; j++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][j-2] - 4.0*ue[m][j-1] +\n\t 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]);\n\t}\n } #pragma omp parallel for firstprivate(j ,dssp ,m ,i ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,dssp ,m ,i ,k ) ", "context_chars": 100, "text": "}\n\n #pragma omp parallel for firstprivate(j ,dssp ,m ,i ,k ) \n for (m = 0; m < 5; m++) {\n\tfor (j = 3; j <= grid_points[1]-4; j++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][j-2] - 4.0*ue[m][j-1] +\n\t 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]);\n\t} #pragma omp parallel for firstprivate(j ,dssp ,m ,i ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,j ,m ,i ,k ) ", "context_chars": 100, "text": "(ue[m][j-2] - 4.0*ue[m][j-1] +\n\t 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tj = grid_points[1]-3;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][j-2] - 4.0*ue[m][j-1] +\n\t 6.0*ue[m][j] - 4.0*ue[m][j+1]);\n\tj = grid_points[1]-2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][j-2] - 4.0*ue[m][j-1] + 5.0*ue[m][j]);\n\n } #pragma omp parallel for firstprivate(dssp ,j ,m ,i ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,i ,j ) ", "context_chars": 100, "text": "; k <= grid_points[2]-1; k++) {\n\tzeta = (double)k * dnzm1;\n\n\texact_solution(xi, eta, zeta, dtemp);\n\tfor (m = 0; m < 5; m++) {\n\t ue[m][k] = dtemp[m];\n\t} #pragma omp parallel for firstprivate(m ,k ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dtpp ,m ,k ,i ,j ) ", "context_chars": 100, "text": "rivate(m ,k ,i ,j ) \n\tfor (m = 0; m < 5; m++) {\n\t ue[m][k] = dtemp[m];\n\t}\n\n\tdtpp = 1.0/dtemp[0];\n\n\tfor (m = 1; m < 5; m++) {\n\t buf[m][k] = dtpp * dtemp[m];\n\t} #pragma omp parallel for firstprivate(dtpp ,m ,k ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dz1tz1 ,tz2 ,dz2tz1 ,zzcon2 ,dz3tz1 ,dz4tz1 ,zzcon1 ,c2 ,dz5tz1 ,zzcon5 ,zzcon4 ,zzcon3 ,c1 ,k ,i ,j ) ", "context_chars": 100, "text": "];\n\tq[k] = 0.5*(buf[1][k]*ue[1][k] + buf[2][k]*ue[2][k] +\n\t\t buf[3][k]*ue[3][k]);\n }\n\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tkm1 = k-1;\n\tkp1 = k+1;\n \n\tforcing[0][i][j][k] = forcing[0][i][j][k] -\n\t tz2*( ue[3][kp1]-ue[3][km1] )+\n\t dz1tz1*(ue[0][kp1]-2.0*ue[0][k]+ue[0][km1]);\n\n\tforcing[1][i][j][k] = forcing[1][i][j][k]\n\t - tz2 * (ue[1][kp1]*buf[3][kp1]-ue[1][km1]*buf[3][km1])+\n\t zzcon2*(buf[1][kp1]-2.0*buf[1][k]+buf[1][km1])+\n\t dz2tz1*( ue[1][kp1]-2.0* ue[1][k]+ ue[1][km1]);\n\n\tforcing[2][i][j][k] = forcing[2][i][j][k]\n\t - tz2 * (ue[2][kp1]*buf[3][kp1]-ue[2][km1]*buf[3][km1])+\n\t zzcon2*(buf[2][kp1]-2.0*buf[2][k]+buf[2][km1])+\n\t dz3tz1*(ue[2][kp1]-2.0*ue[2][k]+ue[2][km1]);\n\n\tforcing[3][i][j][k] = forcing[3][i][j][k]\n\t - tz2 * ((ue[3][kp1]*buf[3][kp1]+c2*(ue[4][kp1]-q[kp1]))-\n\t\t (ue[3][km1]*buf[3][km1]+c2*(ue[4][km1]-q[km1])))+\n\t zzcon1*(buf[3][kp1]-2.0*buf[3][k]+buf[3][km1])+\n\t dz4tz1*( ue[3][kp1]-2.0*ue[3][k] +ue[3][km1]);\n\n\tforcing[4][i][j][k] = forcing[4][i][j][k]\n\t - tz2 * (buf[3][kp1]*(c1*ue[4][kp1]-c2*q[kp1])-\n\t\t buf[3][km1]*(c1*ue[4][km1]-c2*q[km1]))+\n\t 0.5*zzcon3*(buf[0][kp1]-2.0*buf[0][k]\n\t\t +buf[0][km1])+\n\t zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+\n\t zzcon5*(buf[4][kp1]-2.0*buf[4][k]+buf[4][km1])+\n\t dz5tz1*( ue[4][kp1]-2.0*ue[4][k]+ ue[4][km1]);\n } #pragma omp parallel for firstprivate(dz1tz1 ,tz2 ,dz2tz1 ,zzcon2 ,dz3tz1 ,dz4tz1 ,zzcon1 ,c2 ,dz5tz1 ,zzcon5 ,zzcon4 ,zzcon3 ,c1 ,k ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,m ,i ,j ) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n\tk = 1;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (5.0*ue[m][k] - 4.0*ue[m][k+1] +ue[m][k+2]);\n\tk = 2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (-4.0*ue[m][k-1] + 6.0*ue[m][k] -\n\t 4.0*ue[m][k+1] + ue[m][k+2]);\n } #pragma omp parallel for firstprivate(dssp ,m ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,dssp ,m ,i ,j ) ", "context_chars": 100, "text": " dssp *\n\t (-4.0*ue[m][k-1] + 6.0*ue[m][k] -\n\t 4.0*ue[m][k+1] + ue[m][k+2]);\n }\n\n for (m = 0; m < 5; m++) {\n\t#pragma omp parallel for firstprivate(k ,dssp ,m ,i ,j ) \n\tfor (k = 3; k <= grid_points[2]-4; k++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][k-2] - 4.0*ue[m][k-1] +\n\t 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]);\n\t}\n } #pragma omp parallel for firstprivate(k ,dssp ,m ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,dssp ,m ,i ,j ) ", "context_chars": 100, "text": "}\n\n #pragma omp parallel for firstprivate(k ,dssp ,m ,i ,j ) \n for (m = 0; m < 5; m++) {\n\tfor (k = 3; k <= grid_points[2]-4; k++) {\n\t forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*\n\t (ue[m][k-2] - 4.0*ue[m][k-1] +\n\t 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]);\n\t} #pragma omp parallel for firstprivate(k ,dssp ,m ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dssp ,k ,m ,i ,j ) ", "context_chars": 100, "text": "(ue[m][k-2] - 4.0*ue[m][k-1] +\n\t 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]);\n\t}\n }\n\n for (m = 0; m < 5; m++) {\n\tk = grid_points[2]-3;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][k-2] - 4.0*ue[m][k-1] +\n\t 6.0*ue[m][k] - 4.0*ue[m][k+1]);\n\tk = grid_points[2]-2;\n\tforcing[m][i][j][k] = forcing[m][i][j][k] - dssp *\n\t (ue[m][k-2] - 4.0*ue[m][k-1] + 5.0*ue[m][k]);\n } #pragma omp parallel for firstprivate(dssp ,k ,m ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": " of the forcing function, \nc-------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": "-----------*/\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": "gma omp parallel for firstprivate(i ,j ,k ,m ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];\n\t} #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(zeta ,eta ,xi ,dtemp ,m ) ", "context_chars": 100, "text": "xi, eta, zeta \nc-------------------------------------------------------------------*/\n\n int m;\n\n for (m = 0; m < 5; m++) {\n dtemp[m] = ce[0][m] +\n xi*(ce[1][m] + xi*(ce[4][m] + \n\t\t\t xi*(ce[7][m] + xi*ce[10][m]))) +\n eta*(ce[2][m] + eta*(ce[5][m] + \n\t\t\t eta*(ce[8][m] + eta*ce[11][m])))+\n zeta*(ce[3][m] + zeta*(ce[6][m] +\n\t\t\t\t zeta*(ce[9][m] + \n\t\t\t\t zeta*ce[12][m])));\n } #pragma omp parallel for firstprivate(zeta ,eta ,xi ,dtemp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i ) ", "context_chars": 100, "text": "ng the whole thing here. \nc-------------------------------------------------------------------*/\n\n for (i = 0; i <= IMAX-1; i++) {\n #pragma omp parallel for firstprivate(j ,k ,i ) \n for (j = 0; j <= IMAX-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,i ) \n for (k = 0; k <= IMAX-1; k++) {\n\tu[0][i][j][k] = 1.0;\n\tu[1][i][j][k] = 0.0;\n\tu[2][i][j][k] = 0.0;\n\tu[3][i][j][k] = 0.0;\n\tu[4][i][j][k] = 1.0;\n }\n }\n } #pragma omp parallel for firstprivate(j ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i ) ", "context_chars": 100, "text": "-------*/\n\n #pragma omp parallel for firstprivate(j ,k ,i ) \n for (i = 0; i <= IMAX-1; i++) {\n for (j = 0; j <= IMAX-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,i ) \n for (k = 0; k <= IMAX-1; k++) {\n\tu[0][i][j][k] = 1.0;\n\tu[1][i][j][k] = 0.0;\n\tu[2][i][j][k] = 0.0;\n\tu[3][i][j][k] = 0.0;\n\tu[4][i][j][k] = 1.0;\n }\n } #pragma omp parallel for firstprivate(j ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i ) ", "context_chars": 100, "text": "+) {\n #pragma omp parallel for firstprivate(j ,k ,i ) \n for (j = 0; j <= IMAX-1; j++) {\n for (k = 0; k <= IMAX-1; k++) {\n\tu[0][i][j][k] = 1.0;\n\tu[1][i][j][k] = 0.0;\n\tu[2][i][j][k] = 0.0;\n\tu[3][i][j][k] = 0.0;\n\tu[4][i][j][k] = 1.0;\n } #pragma omp parallel for firstprivate(j ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(Pxi ,Peta ,Pzeta ,xi ,eta ,zeta ,m ,k ,j ,i ) ", "context_chars": 100, "text": "\tfor (iz = 0; iz < 2; iz++) {\n\t exact_solution(xi, eta, (double)iz, \n\t\t\t &Pface[iz][2][0]);\n\t}\n\n\tfor (m = 0; m < 5; m++) {\n\t Pxi = xi * Pface[1][0][m] + \n\t (1.0-xi) * Pface[0][0][m];\n\t Peta = eta * Pface[1][1][m] + \n\t (1.0-eta) * Pface[0][1][m];\n\t Pzeta = zeta * Pface[1][2][m] + \n\t (1.0-zeta) * Pface[0][2][m];\n \n\t u[m][i][j][k] = Pxi + Peta + Pzeta - \n\t Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + \n\t Pxi*Peta*Pzeta;\n\n\t} #pragma omp parallel for firstprivate(Pxi ,Peta ,Pzeta ,xi ,eta ,zeta ,m ,k ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,j ) ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for firstprivate(m ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,m ,k ,j ) ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for firstprivate(i ,m ,k ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,i ) ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for firstprivate(m ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,m ,k ,i ) ", "context_chars": 100, "text": "_points[2]; k++) {\n zeta = (double)k * dnzm1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for firstprivate(j ,m ,k ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,j ,i ) ", "context_chars": 100, "text": "d_points[1]; j++) {\n eta = (double)j * dnym1;\n exact_solution(xi, eta, zeta, temp);\n for (m = 0; m < 5; m++) {\n\tu[m][i][j][k] = temp[m];\n } #pragma omp parallel for firstprivate(m ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,i ,n ) ", "context_chars": 100, "text": "eft hand side for starters\nc-------------------------------------------------------------------*/\n for (n = 0; n < 15; n++) {\n #pragma omp parallel for firstprivate(k ,j ,i ,n ) \n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for firstprivate(k ,j ,i ,n ) \n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for firstprivate(k ,j ,i ,n ) \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[n][i][j][k] = 0.0;\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(k ,j ,i ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,i ,n ) ", "context_chars": 100, "text": "----------*/\n #pragma omp parallel for firstprivate(k ,j ,i ,n ) \n for (n = 0; n < 15; n++) {\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for firstprivate(k ,j ,i ,n ) \n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for firstprivate(k ,j ,i ,n ) \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[n][i][j][k] = 0.0;\n\t}\n }\n } #pragma omp parallel for firstprivate(k ,j ,i ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,i ,n ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(k ,j ,i ,n ) \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for firstprivate(k ,j ,i ,n ) \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[n][i][j][k] = 0.0;\n\t}\n } #pragma omp parallel for firstprivate(k ,j ,i ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,i ,n ) ", "context_chars": 100, "text": " #pragma omp parallel for firstprivate(k ,j ,i ,n ) \n for (j = 0; j < grid_points[1]; j++) {\n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[n][i][j][k] = 0.0;\n\t} #pragma omp parallel for firstprivate(k ,j ,i ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,n ) ", "context_chars": 100, "text": "ll, but \nc convenient\nc-------------------------------------------------------------------*/\n for (n = 0; n < 3; n++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,n ) \n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,n ) \n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,n ) \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[5*n+2][i][j][k] = 1.0;\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,n ) ", "context_chars": 100, "text": "-----------*/\n #pragma omp parallel for firstprivate(i ,j ,k ,n ) \n for (n = 0; n < 3; n++) {\n for (i = 0; i < grid_points[0]; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,n ) \n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,n ) \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[5*n+2][i][j][k] = 1.0;\n\t}\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,n ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(i ,j ,k ,n ) \n for (i = 0; i < grid_points[0]; i++) {\n for (j = 0; j < grid_points[1]; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,n ) \n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[5*n+2][i][j][k] = 1.0;\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,n ) ", "context_chars": 100, "text": " #pragma omp parallel for firstprivate(i ,j ,k ,n ) \n for (j = 0; j < grid_points[1]; j++) {\n\tfor (k = 0; k < grid_points[2]; k++) {\n\t lhs[5*n+2][i][j][k] = 1.0;\n\t} #pragma omp parallel for firstprivate(i ,j ,k ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,comz5 ,comz4 ,comz1 ,comz6 ,j ) ", "context_chars": 100, "text": "--------------------*/\n\n i = 1;\n #pragma omp for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;\n lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz6;\n lhs[3][i+1][j][k] = lhs[3][i+1][j][k] - comz4;\n lhs[4][i+1][j][k] = lhs[4][i+1][j][k] + comz1;\n } #pragma omp parallel for firstprivate(k ,comz5 ,comz4 ,comz1 ,comz6 ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) ", "context_chars": 100, "text": "][i+1][j][k] + comz1;\n }\n }\n\n #pragma omp for \n for (i = 3; i <= grid_points[0]-4; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n } #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) ", "context_chars": 100, "text": "or firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n } #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,comz1 ,i ,comz4 ,comz6 ,comz5 ,j ) ", "context_chars": 100, "text": " }\n }\n\n i = grid_points[0]-3;\n #pragma omp for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\n lhs[0][i+1][j][k] = lhs[0][i+1][j][k] + comz1;\n lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;\n lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz5;\n } #pragma omp parallel for firstprivate(k ,comz1 ,i ,comz4 ,comz6 ,comz5 ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dttx2 ,i ) ", "context_chars": 100, "text": "------------------------------*/\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,dttx2 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n }\n } #pragma omp parallel for firstprivate(j ,k ,dttx2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dttx2 ,i ) ", "context_chars": 100, "text": "omp parallel for firstprivate(j ,k ,dttx2 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttx2 * speed[i-1][j][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttx2 * speed[i+1][j][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n } #pragma omp parallel for firstprivate(j ,k ,dttx2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,comz5 ,comz4 ,comz1 ,comz6 ,i ) ", "context_chars": 100, "text": "--------------------*/\n\n j = 1;\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n \n lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;\n lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz6;\n lhs[3][i][j+1][k] = lhs[3][i][j+1][k] - comz4;\n lhs[4][i][j+1][k] = lhs[4][i][j+1][k] + comz1;\n } #pragma omp parallel for firstprivate(k ,comz5 ,comz4 ,comz1 ,comz6 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) ", "context_chars": 100, "text": "][i][j+1][k] + comz1;\n }\n }\n\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 3; j <= grid_points[1]-4; j++) {\n #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n } #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) ", "context_chars": 100, "text": "or firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) \n for (j = 3; j <= grid_points[1]-4; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n } #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,comz1 ,j ,comz4 ,comz6 ,comz5 ,i ) ", "context_chars": 100, "text": " }\n }\n\n j = grid_points[1]-3;\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\n lhs[0][i][j+1][k] = lhs[0][i][j+1][k] + comz1;\n lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;\n lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz5;\n } #pragma omp parallel for firstprivate(k ,comz1 ,j ,comz4 ,comz6 ,comz5 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dtty2 ,i ) ", "context_chars": 100, "text": "------------------------------*/\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,dtty2 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n }\n } #pragma omp parallel for firstprivate(j ,k ,dtty2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dtty2 ,i ) ", "context_chars": 100, "text": "omp parallel for firstprivate(j ,k ,dtty2 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dtty2 * speed[i][j-1][k];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dtty2 * speed[i][j+1][k];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n } #pragma omp parallel for firstprivate(j ,k ,dtty2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,comz5 ,comz4 ,comz1 ,comz6 ,i ) ", "context_chars": 100, "text": "--------------------*/\n\n k = 1;\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n\n lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;\n lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz6;\n lhs[3][i][j][k+1] = lhs[3][i][j][k+1] - comz4;\n lhs[4][i][j][k+1] = lhs[4][i][j][k+1] + comz1;\n } #pragma omp parallel for firstprivate(j ,comz5 ,comz4 ,comz1 ,comz6 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) ", "context_chars": 100, "text": "][i][j][k+1] + comz1;\n }\n }\n\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) \n for (k = 3; k <= grid_points[2]-4; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n }\n } #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) ", "context_chars": 100, "text": "or firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 3; k <= grid_points[2]-4; k++) {\n\tlhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n\tlhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n\tlhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n\tlhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\tlhs[4][i][j][k] = lhs[4][i][j][k] + comz1;\n } #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,comz1 ,k ,comz4 ,comz6 ,comz5 ,i ) ", "context_chars": 100, "text": " }\n }\n\n k = grid_points[2]-3;\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;\n lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;\n lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;\n lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;\n\n lhs[0][i][j][k+1] = lhs[0][i][j][k+1] + comz1;\n lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;\n lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz5;\n } #pragma omp parallel for firstprivate(j ,comz1 ,k ,comz4 ,comz6 ,comz5 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dttz2 ,i ) ", "context_chars": 100, "text": "------------------------------*/\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,dttz2 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n }\n } #pragma omp parallel for firstprivate(j ,k ,dttz2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dttz2 ,i ) ", "context_chars": 100, "text": "omp parallel for firstprivate(j ,k ,dttz2 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tlhs[0+5][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+5][i][j][k] = lhs[1][i][j][k] - \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+5][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+5][i][j][k] = lhs[3][i][j][k] + \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+5][i][j][k] = lhs[4][i][j][k];\n\tlhs[0+10][i][j][k] = lhs[0][i][j][k];\n\tlhs[1+10][i][j][k] = lhs[1][i][j][k] + \n\t dttz2 * speed[i][j][k-1];\n\tlhs[2+10][i][j][k] = lhs[2][i][j][k];\n\tlhs[3+10][i][j][k] = lhs[3][i][j][k] - \n\t dttz2 * speed[i][j][k+1];\n\tlhs[4+10][i][j][k] = lhs[4][i][j][k];\n } #pragma omp parallel for firstprivate(j ,k ,dttz2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) ", "context_chars": 100, "text": "------------------------------------------*/\n\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n \n\tt1 = bt * r3;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = -r2;\n\trhs[1][i][j][k] = r1;\n\trhs[2][i][j][k] = bt * ( r4 - r5 );\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for private(i ,j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) ", "context_chars": 100, "text": " for private(i ,j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n \n\tt1 = bt * r3;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = -r2;\n\trhs[1][i][j][k] = r1;\n\trhs[2][i][j][k] = bt * ( r4 - r5 );\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n } #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) ", "context_chars": 100, "text": "vate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n \n\tt1 = bt * r3;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = -r2;\n\trhs[1][i][j][k] = r1;\n\trhs[2][i][j][k] = bt * ( r4 - r5 );\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n } #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for private(i ,j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) ", "context_chars": 100, "text": "-----------------------------------------*/\n\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = bt * r1;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = bt * ( r4 - r5 );\n\trhs[1][i][j][k] = -r3;\n\trhs[2][i][j][k] = r2;\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for private(i ,j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) ", "context_chars": 100, "text": " for private(i ,j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = bt * r1;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = bt * ( r4 - r5 );\n\trhs[1][i][j][k] = -r3;\n\trhs[2][i][j][k] = r2;\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n } #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) ", "context_chars": 100, "text": "vate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = bt * r1;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = bt * ( r4 - r5 );\n\trhs[1][i][j][k] = -r3;\n\trhs[2][i][j][k] = r2;\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n } #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " and the speed of sound. \nc-------------------------------------------------------------------*/\n\n for (i = 0; i <= grid_points[0]-1; i++) {\n #pragma omp parallel for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) \n for (j = 0; j <= grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) \n for (k = 0; k <= grid_points[2]-1; k++) {\n\trho_inv = 1.0/u[0][i][j][k];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[1][i][j][k] * rho_inv;\n\tvs[i][j][k] = u[2][i][j][k] * rho_inv;\n\tws[i][j][k] = u[3][i][j][k] * rho_inv;\n\tsquare[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] + \n\t\t\t\tu[2][i][j][k]*u[2][i][j][k] +\n\t\t\t\tu[3][i][j][k]*u[3][i][j][k] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n/*--------------------------------------------------------------------\nc (do not need speed and ainx until the lhs computation)\nc-------------------------------------------------------------------*/\n\taux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]);\n\taux = sqrt(aux);\n\tspeed[i][j][k] = aux;\n\tainv[i][j][k] = 1.0/aux;\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) ", "context_chars": 100, "text": "--------------------*/\n\n #pragma omp parallel for \n for (i = 0; i <= grid_points[0]-1; i++) {\n for (j = 0; j <= grid_points[1]-1; j++) {\n #pragma omp parallel for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) \n for (k = 0; k <= grid_points[2]-1; k++) {\n\trho_inv = 1.0/u[0][i][j][k];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[1][i][j][k] * rho_inv;\n\tvs[i][j][k] = u[2][i][j][k] * rho_inv;\n\tws[i][j][k] = u[3][i][j][k] * rho_inv;\n\tsquare[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] + \n\t\t\t\tu[2][i][j][k]*u[2][i][j][k] +\n\t\t\t\tu[3][i][j][k]*u[3][i][j][k] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n/*--------------------------------------------------------------------\nc (do not need speed and ainx until the lhs computation)\nc-------------------------------------------------------------------*/\n\taux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]);\n\taux = sqrt(aux);\n\tspeed[i][j][k] = aux;\n\tainv[i][j][k] = 1.0/aux;\n }\n } #pragma omp parallel for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) ", "context_chars": 100, "text": "for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) \n for (j = 0; j <= grid_points[1]-1; j++) {\n for (k = 0; k <= grid_points[2]-1; k++) {\n\trho_inv = 1.0/u[0][i][j][k];\n\trho_i[i][j][k] = rho_inv;\n\tus[i][j][k] = u[1][i][j][k] * rho_inv;\n\tvs[i][j][k] = u[2][i][j][k] * rho_inv;\n\tws[i][j][k] = u[3][i][j][k] * rho_inv;\n\tsquare[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] + \n\t\t\t\tu[2][i][j][k]*u[2][i][j][k] +\n\t\t\t\tu[3][i][j][k]*u[3][i][j][k] ) * rho_inv;\n\tqs[i][j][k] = square[i][j][k] * rho_inv;\n/*--------------------------------------------------------------------\nc (do not need speed and ainx until the lhs computation)\nc-------------------------------------------------------------------*/\n\taux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]);\n\taux = sqrt(aux);\n\tspeed[i][j][k] = aux;\n\tainv[i][j][k] = 1.0/aux;\n } #pragma omp parallel for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "----------------------------------------------------------------*/\n\n for (m = 0; m < 5; m++) {\n for (i = 0; i <= grid_points[0]-1; i++) {\n for (j = 0; j <= grid_points[1]-1; j++) {\n\t#pragma omp parallel for firstprivate(k ,j ,i ,m ) \n\tfor (k = 0; k <= grid_points[2]-1; k++) {\n\t rhs[m][i][j][k] = forcing[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(k ,j ,i ,m ) ", "context_chars": 100, "text": "for \n for (i = 0; i <= grid_points[0]-1; i++) {\n for (j = 0; j <= grid_points[1]-1; j++) {\n\tfor (k = 0; k <= grid_points[2]-1; k++) {\n\t rhs[m][i][j][k] = forcing[m][i][j][k];\n\t} #pragma omp parallel for firstprivate(k ,j ,i ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "mpute xi-direction fluxes \nc-------------------------------------------------------------------*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * \n\t (u[0][i+1][j][k] - 2.0*u[0][i][j][k] + \n\t u[0][i-1][j][k]) -\n\t tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * \n\t (u[1][i+1][j][k] - 2.0*u[1][i][j][k] + \n\t u[1][i-1][j][k]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[1][i+1][j][k]*up1 - \n\t\t u[1][i-1][j][k]*um1 +\n\t\t (u[4][i+1][j][k]- square[i+1][j][k]-\n\t\t u[4][i-1][j][k]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * \n\t (u[2][i+1][j][k] - 2.0*u[2][i][j][k] +\n\t u[2][i-1][j][k]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[2][i+1][j][k]*up1 - \n\t\t u[2][i-1][j][k]*um1);\n\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * \n\t (u[3][i+1][j][k] - 2.0*u[3][i][j][k] +\n\t u[3][i-1][j][k]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[3][i+1][j][k]*up1 - \n\t\t u[3][i-1][j][k]*um1);\n\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * \n\t (u[4][i+1][j][k] - 2.0*u[4][i][j][k] +\n\t u[4][i-1][j][k]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i-1][j][k]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[4][i+1][j][k] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[4][i-1][j][k] - \n\t\t c2*square[i-1][j][k])*um1 );\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) ", "context_chars": 100, "text": "---------------------*/\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * \n\t (u[0][i+1][j][k] - 2.0*u[0][i][j][k] + \n\t u[0][i-1][j][k]) -\n\t tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * \n\t (u[1][i+1][j][k] - 2.0*u[1][i][j][k] + \n\t u[1][i-1][j][k]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[1][i+1][j][k]*up1 - \n\t\t u[1][i-1][j][k]*um1 +\n\t\t (u[4][i+1][j][k]- square[i+1][j][k]-\n\t\t u[4][i-1][j][k]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * \n\t (u[2][i+1][j][k] - 2.0*u[2][i][j][k] +\n\t u[2][i-1][j][k]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[2][i+1][j][k]*up1 - \n\t\t u[2][i-1][j][k]*um1);\n\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * \n\t (u[3][i+1][j][k] - 2.0*u[3][i][j][k] +\n\t u[3][i-1][j][k]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[3][i+1][j][k]*up1 - \n\t\t u[3][i-1][j][k]*um1);\n\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * \n\t (u[4][i+1][j][k] - 2.0*u[4][i][j][k] +\n\t u[4][i-1][j][k]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i-1][j][k]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[4][i+1][j][k] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[4][i-1][j][k] - \n\t\t c2*square[i-1][j][k])*um1 );\n }\n } #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) ", "context_chars": 100, "text": "x4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tuijk = us[i][j][k];\n\tup1 = us[i+1][j][k];\n\tum1 = us[i-1][j][k];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * \n\t (u[0][i+1][j][k] - 2.0*u[0][i][j][k] + \n\t u[0][i-1][j][k]) -\n\t tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * \n\t (u[1][i+1][j][k] - 2.0*u[1][i][j][k] + \n\t u[1][i-1][j][k]) +\n\t xxcon2*con43 * (up1 - 2.0*uijk + um1) -\n\t tx2 * (u[1][i+1][j][k]*up1 - \n\t\t u[1][i-1][j][k]*um1 +\n\t\t (u[4][i+1][j][k]- square[i+1][j][k]-\n\t\t u[4][i-1][j][k]+ square[i-1][j][k])*\n\t\t c2);\n\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * \n\t (u[2][i+1][j][k] - 2.0*u[2][i][j][k] +\n\t u[2][i-1][j][k]) +\n\t xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +\n\t\t vs[i-1][j][k]) -\n\t tx2 * (u[2][i+1][j][k]*up1 - \n\t\t u[2][i-1][j][k]*um1);\n\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * \n\t (u[3][i+1][j][k] - 2.0*u[3][i][j][k] +\n\t u[3][i-1][j][k]) +\n\t xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +\n\t\t ws[i-1][j][k]) -\n\t tx2 * (u[3][i+1][j][k]*up1 - \n\t\t u[3][i-1][j][k]*um1);\n\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * \n\t (u[4][i+1][j][k] - 2.0*u[4][i][j][k] +\n\t u[4][i-1][j][k]) +\n\t xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +\n\t\t qs[i-1][j][k]) +\n\t xxcon4 * (up1*up1 - 2.0*uijk*uijk + \n\t\t um1*um1) +\n\t xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i-1][j][k]*rho_i[i-1][j][k]) -\n\t tx2 * ( (c1*u[4][i+1][j][k] - \n\t\t c2*square[i+1][j][k])*up1 -\n\t\t (c1*u[4][i-1][j][k] - \n\t\t c2*square[i-1][j][k])*um1 );\n } #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dssp ,m ) ", "context_chars": 100, "text": "n \nc-------------------------------------------------------------------*/\n\n i = 1;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +\n\t u[m][i+2][j][k]);\n }\n }\n } #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n\n i = 1;\n #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +\n\t u[m][i+2][j][k]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +\n\t u[m][i+2][j][k]);\n } #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dssp ,m ) ", "context_chars": 100, "text": "\n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +\n\t u[m][i+2][j][k]);\n }\n }\n }\n i = 2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);\n }\n }\n } #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "}\n i = 2;\n #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);\n } #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": "][i-1][j][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 3*1; i <= grid_points[0]-3*1-1; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 3*1; i <= grid_points[0]-3*1-1; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": "< 5; m++) {\n #pragma omp parallel for \n for (i = 3*1; i <= grid_points[0]-3*1-1; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": " omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t} #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) ", "context_chars": 100, "text": "- 4.0*u[m][i+1][j][k] + \n\t u[m][i+2][j][k] );\n\t}\n }\n }\n }\n\n i = grid_points[0]-3;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );\n }\n }\n } #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s[0]-3;\n #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );\n } #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) ", "context_chars": 100, "text": "[k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );\n }\n }\n }\n\n i = grid_points[0]-2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +\n\t 5.0*u[m][i][j][k] );\n }\n }\n } #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s[0]-2;\n #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +\n\t 5.0*u[m][i][j][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +\n\t 5.0*u[m][i][j][k] );\n } #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "pute eta-direction fluxes \nc-------------------------------------------------------------------*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * \n\t (u[0][i][j+1][k] - 2.0*u[0][i][j][k] + \n\t u[0][i][j-1][k]) -\n\t ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * \n\t (u[1][i][j+1][k] - 2.0*u[1][i][j][k] + \n\t u[1][i][j-1][k]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[1][i][j+1][k]*vp1 - \n\t\t u[1][i][j-1][k]*vm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * \n\t (u[2][i][j+1][k] - 2.0*u[2][i][j][k] + \n\t u[2][i][j-1][k]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[2][i][j+1][k]*vp1 - \n\t\t u[2][i][j-1][k]*vm1 +\n\t\t (u[4][i][j+1][k] - square[i][j+1][k] - \n\t\t u[4][i][j-1][k] + square[i][j-1][k])\n\t\t *c2);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * \n\t (u[3][i][j+1][k] - 2.0*u[3][i][j][k] + \n\t u[3][i][j-1][k]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[3][i][j+1][k]*vp1 - \n\t\t u[3][i][j-1][k]*vm1);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * \n\t (u[4][i][j+1][k] - 2.0*u[4][i][j][k] + \n\t u[4][i][j-1][k]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j-1][k]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[4][i][j+1][k] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[4][i][j-1][k] - \n\t\t c2*square[i][j-1][k]) * vm1);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) ", "context_chars": 100, "text": "---------------------*/\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * \n\t (u[0][i][j+1][k] - 2.0*u[0][i][j][k] + \n\t u[0][i][j-1][k]) -\n\t ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * \n\t (u[1][i][j+1][k] - 2.0*u[1][i][j][k] + \n\t u[1][i][j-1][k]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[1][i][j+1][k]*vp1 - \n\t\t u[1][i][j-1][k]*vm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * \n\t (u[2][i][j+1][k] - 2.0*u[2][i][j][k] + \n\t u[2][i][j-1][k]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[2][i][j+1][k]*vp1 - \n\t\t u[2][i][j-1][k]*vm1 +\n\t\t (u[4][i][j+1][k] - square[i][j+1][k] - \n\t\t u[4][i][j-1][k] + square[i][j-1][k])\n\t\t *c2);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * \n\t (u[3][i][j+1][k] - 2.0*u[3][i][j][k] + \n\t u[3][i][j-1][k]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[3][i][j+1][k]*vp1 - \n\t\t u[3][i][j-1][k]*vm1);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * \n\t (u[4][i][j+1][k] - 2.0*u[4][i][j][k] + \n\t u[4][i][j-1][k]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j-1][k]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[4][i][j+1][k] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[4][i][j-1][k] - \n\t\t c2*square[i][j-1][k]) * vm1);\n }\n } #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) ", "context_chars": 100, "text": "y4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tvijk = vs[i][j][k];\n\tvp1 = vs[i][j+1][k];\n\tvm1 = vs[i][j-1][k];\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * \n\t (u[0][i][j+1][k] - 2.0*u[0][i][j][k] + \n\t u[0][i][j-1][k]) -\n\t ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * \n\t (u[1][i][j+1][k] - 2.0*u[1][i][j][k] + \n\t u[1][i][j-1][k]) +\n\t yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + \n\t\t us[i][j-1][k]) -\n\t ty2 * (u[1][i][j+1][k]*vp1 - \n\t\t u[1][i][j-1][k]*vm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * \n\t (u[2][i][j+1][k] - 2.0*u[2][i][j][k] + \n\t u[2][i][j-1][k]) +\n\t yycon2*con43 * (vp1 - 2.0*vijk + vm1) -\n\t ty2 * (u[2][i][j+1][k]*vp1 - \n\t\t u[2][i][j-1][k]*vm1 +\n\t\t (u[4][i][j+1][k] - square[i][j+1][k] - \n\t\t u[4][i][j-1][k] + square[i][j-1][k])\n\t\t *c2);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * \n\t (u[3][i][j+1][k] - 2.0*u[3][i][j][k] + \n\t u[3][i][j-1][k]) +\n\t yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + \n\t\t ws[i][j-1][k]) -\n\t ty2 * (u[3][i][j+1][k]*vp1 - \n\t\t u[3][i][j-1][k]*vm1);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * \n\t (u[4][i][j+1][k] - 2.0*u[4][i][j][k] + \n\t u[4][i][j-1][k]) +\n\t yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + \n\t\t qs[i][j-1][k]) +\n\t yycon4 * (vp1*vp1 - 2.0*vijk*vijk + \n\t\t vm1*vm1) +\n\t yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j-1][k]*rho_i[i][j-1][k]) -\n\t ty2 * ((c1*u[4][i][j+1][k] - \n\t\t c2*square[i][j+1][k]) * vp1 -\n\t\t (c1*u[4][i][j-1][k] - \n\t\t c2*square[i][j-1][k]) * vm1);\n } #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,dssp ,m ) ", "context_chars": 100, "text": "ipation \nc-------------------------------------------------------------------*/\n\n j = 1;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +\n\t u[m][i][j+2][k]);\n }\n }\n } #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n\n j = 1;\n #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +\n\t u[m][i][j+2][k]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +\n\t u[m][i][j+2][k]);\n } #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,dssp ,m ) ", "context_chars": 100, "text": "\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +\n\t u[m][i][j+2][k]);\n }\n }\n }\n\n j = 2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);\n }\n }\n } #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n\n j = 2;\n #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);\n } #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": "][i][j-1][k] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": "arallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t} #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) ", "context_chars": 100, "text": " 4.0*u[m][i][j+1][k] + \n\t u[m][i][j+2][k] );\n\t}\n }\n }\n }\n \n j = grid_points[1]-3;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );\n }\n }\n } #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s[1]-3;\n #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );\n } #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) ", "context_chars": 100, "text": "[k] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );\n }\n }\n }\n\n j = grid_points[1]-2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +\n\t 5.0*u[m][i][j][k] );\n }\n }\n } #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s[1]-2;\n #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +\n\t 5.0*u[m][i][j][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +\n\t 5.0*u[m][i][j][k] );\n } #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ute zeta-direction fluxes \nc-------------------------------------------------------------------*/\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * \n\t (u[0][i][j][k+1] - 2.0*u[0][i][j][k] + \n\t u[0][i][j][k-1]) -\n\t tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * \n\t (u[1][i][j][k+1] - 2.0*u[1][i][j][k] + \n\t u[1][i][j][k-1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[1][i][j][k+1]*wp1 - \n\t\t u[1][i][j][k-1]*wm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * \n\t (u[2][i][j][k+1] - 2.0*u[2][i][j][k] + \n\t u[2][i][j][k-1]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[2][i][j][k+1]*wp1 - \n\t\t u[2][i][j][k-1]*wm1);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * \n\t (u[3][i][j][k+1] - 2.0*u[3][i][j][k] + \n\t u[3][i][j][k-1]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[3][i][j][k+1]*wp1 - \n\t\t u[3][i][j][k-1]*wm1 +\n\t\t (u[4][i][j][k+1] - square[i][j][k+1] - \n\t\t u[4][i][j][k-1] + square[i][j][k-1])\n\t\t *c2);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * \n\t (u[4][i][j][k+1] - 2.0*u[4][i][j][k] + \n\t u[4][i][j][k-1]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j][k-1]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[4][i][j][k+1] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[4][i][j][k-1] - \n\t\t c2*square[i][j][k-1])*wm1);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) ", "context_chars": 100, "text": "---------------------*/\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * \n\t (u[0][i][j][k+1] - 2.0*u[0][i][j][k] + \n\t u[0][i][j][k-1]) -\n\t tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * \n\t (u[1][i][j][k+1] - 2.0*u[1][i][j][k] + \n\t u[1][i][j][k-1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[1][i][j][k+1]*wp1 - \n\t\t u[1][i][j][k-1]*wm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * \n\t (u[2][i][j][k+1] - 2.0*u[2][i][j][k] + \n\t u[2][i][j][k-1]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[2][i][j][k+1]*wp1 - \n\t\t u[2][i][j][k-1]*wm1);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * \n\t (u[3][i][j][k+1] - 2.0*u[3][i][j][k] + \n\t u[3][i][j][k-1]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[3][i][j][k+1]*wp1 - \n\t\t u[3][i][j][k-1]*wm1 +\n\t\t (u[4][i][j][k+1] - square[i][j][k+1] - \n\t\t u[4][i][j][k-1] + square[i][j][k-1])\n\t\t *c2);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * \n\t (u[4][i][j][k+1] - 2.0*u[4][i][j][k] + \n\t u[4][i][j][k-1]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j][k-1]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[4][i][j][k+1] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[4][i][j][k-1] - \n\t\t c2*square[i][j][k-1])*wm1);\n }\n } #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) ", "context_chars": 100, "text": "con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\twijk = ws[i][j][k];\n\twp1 = ws[i][j][k+1];\n\twm1 = ws[i][j][k-1];\n\n\trhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * \n\t (u[0][i][j][k+1] - 2.0*u[0][i][j][k] + \n\t u[0][i][j][k-1]) -\n\t tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]);\n\trhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * \n\t (u[1][i][j][k+1] - 2.0*u[1][i][j][k] + \n\t u[1][i][j][k-1]) +\n\t zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + \n\t\t us[i][j][k-1]) -\n\t tz2 * (u[1][i][j][k+1]*wp1 - \n\t\t u[1][i][j][k-1]*wm1);\n\trhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * \n\t (u[2][i][j][k+1] - 2.0*u[2][i][j][k] + \n\t u[2][i][j][k-1]) +\n\t zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + \n\t\t vs[i][j][k-1]) -\n\t tz2 * (u[2][i][j][k+1]*wp1 - \n\t\t u[2][i][j][k-1]*wm1);\n\trhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * \n\t (u[3][i][j][k+1] - 2.0*u[3][i][j][k] + \n\t u[3][i][j][k-1]) +\n\t zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -\n\t tz2 * (u[3][i][j][k+1]*wp1 - \n\t\t u[3][i][j][k-1]*wm1 +\n\t\t (u[4][i][j][k+1] - square[i][j][k+1] - \n\t\t u[4][i][j][k-1] + square[i][j][k-1])\n\t\t *c2);\n\trhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * \n\t (u[4][i][j][k+1] - 2.0*u[4][i][j][k] + \n\t u[4][i][j][k-1]) +\n\t zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + \n\t\t qs[i][j][k-1]) +\n\t zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + \n\t\t wm1*wm1) +\n\t zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] - \n\t\t 2.0*u[4][i][j][k]*rho_i[i][j][k] +\n\t\t u[4][i][j][k-1]*rho_i[i][j][k-1]) -\n\t tz2 * ( (c1*u[4][i][j][k+1] - \n\t\t c2*square[i][j][k+1])*wp1 -\n\t\t (c1*u[4][i][j][k-1] - \n\t\t c2*square[i][j][k-1])*wm1);\n } #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,dssp ,m ) ", "context_chars": 100, "text": " \nc-------------------------------------------------------------------*/\n\n k = 1;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +\n\t u[m][i][j][k+2]);\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n\n k = 1;\n #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +\n\t u[m][i][j][k+2]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k]- dssp * \n\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +\n\t u[m][i][j][k+2]);\n } #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,dssp ,m ) ", "context_chars": 100, "text": "\t ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +\n\t u[m][i][j][k+2]);\n }\n }\n }\n\n k = 2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "\n\n k = 2;\n #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t (-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);\n } #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": "][i][j][k-1] + 6.0*u[m][i][j][k] -\n\t 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n\tfor (k = 3*1; k <= grid_points[2]-3*1-1; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n\tfor (k = 3*1; k <= grid_points[2]-3*1-1; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n\tfor (k = 3*1; k <= grid_points[2]-3*1-1; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": " omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 3*1; k <= grid_points[2]-3*1-1; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * \n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t} #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": " 4.0*u[m][i][j][k+1] + \n\t u[m][i][j][k+2] );\n\t}\n }\n }\n }\n \n k = grid_points[2]-3;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s[2]-3;\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": "-1] + \n\t 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );\n }\n }\n }\n\n k = grid_points[2]-2;\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +\n\t 5.0*u[m][i][j][k] );\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "s[2]-2;\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +\n\t 5.0*u[m][i][j][k] );\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] - dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +\n\t 5.0*u[m][i][j][k] );\n } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) ", "context_chars": 100, "text": " dssp *\n\t ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +\n\t 5.0*u[m][i][j][k] );\n }\n }\n }\n\n for (m = 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n\n #pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) \n for (m = 0; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) ", "context_chars": 100, "text": " 0; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) ", "context_chars": 100, "text": "ma omp parallel for firstprivate(i ,j ,k ,dt ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] * dt;\n\t} #pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,ru1 ,uu ,vv ,ww ,ac ,ac2inv ,r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3 ,c2 ,bt ,i ) ", "context_chars": 100, "text": "1, r2, r3, \n r4, r5, ac2inv;\n\n #pragma omp for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,ru1 ,uu ,vv ,ww ,ac ,ac2inv ,r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3 ,c2 ,bt ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tru1 = rho_i[i][j][k];\n\tuu = us[i][j][k];\n\tvv = vs[i][j][k];\n\tww = ws[i][j][k];\n\tac = speed[i][j][k];\n\tac2inv = ainv[i][j][k]*ainv[i][j][k];\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 - \n\t\t\t vv*r3 - ww*r4 + r5 );\n\tt2 = bt * ru1 * ( uu * r1 - r2 );\n\tt3 = ( bt * ru1 * ac ) * t1;\n\n\trhs[0][i][j][k] = r1 - t1;\n\trhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 );\n\trhs[2][i][j][k] = ru1 * ( vv*r1 - r3 );\n\trhs[3][i][j][k] = - t2 + t3;\n\trhs[4][i][j][k] = t2 + t3;\n }\n } #pragma omp parallel for firstprivate(j ,k ,ru1 ,uu ,vv ,ww ,ac ,ac2inv ,r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3 ,c2 ,bt ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,ru1 ,uu ,vv ,ww ,ac ,ac2inv ,r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3 ,c2 ,bt ,i ) ", "context_chars": 100, "text": "c2inv ,r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3 ,c2 ,bt ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tru1 = rho_i[i][j][k];\n\tuu = us[i][j][k];\n\tvv = vs[i][j][k];\n\tww = ws[i][j][k];\n\tac = speed[i][j][k];\n\tac2inv = ainv[i][j][k]*ainv[i][j][k];\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 - \n\t\t\t vv*r3 - ww*r4 + r5 );\n\tt2 = bt * ru1 * ( uu * r1 - r2 );\n\tt3 = ( bt * ru1 * ac ) * t1;\n\n\trhs[0][i][j][k] = r1 - t1;\n\trhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 );\n\trhs[2][i][j][k] = ru1 * ( vv*r1 - r3 );\n\trhs[3][i][j][k] = - t2 + t3;\n\trhs[4][i][j][k] = t2 + t3;\n } #pragma omp parallel for firstprivate(j ,k ,ru1 ,uu ,vv ,ww ,ac ,ac2inv ,r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3 ,c2 ,bt ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,xvel ,yvel ,zvel ,ac ,acinv ,r1 ,r2 ,r3 ,r4 ,r5 ,uzik1 ,btuz ,t1 ,t2 ,t3 ,bt ,c2iv ,i ) ", "context_chars": 100, "text": "vel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 ) \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,xvel ,yvel ,zvel ,ac ,acinv ,r1 ,r2 ,r3 ,r4 ,r5 ,uzik1 ,btuz ,t1 ,t2 ,t3 ,bt ,c2iv ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\txvel = us[i][j][k];\n\tyvel = vs[i][j][k];\n\tzvel = ws[i][j][k];\n\tac = speed[i][j][k];\n\tacinv = ainv[i][j][k];\n\n\tac2u = ac*ac;\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tuzik1 = u[0][i][j][k];\n\tbtuz = bt * uzik1;\n\n\tt1 = btuz*acinv * (r4 + r5);\n\tt2 = r3 + t1;\n\tt3 = btuz * (r4 - r5);\n\n\trhs[0][i][j][k] = t2;\n\trhs[1][i][j][k] = -uzik1*r2 + xvel*t2;\n\trhs[2][i][j][k] = uzik1*r1 + yvel*t2;\n\trhs[3][i][j][k] = zvel*t2 + t3;\n\trhs[4][i][j][k] = uzik1*(-xvel*r2 + yvel*r1) + \n\t qs[i][j][k]*t2 + c2iv*ac2u*t1 + zvel*t3;\n }\n } #pragma omp parallel for firstprivate(j ,k ,xvel ,yvel ,zvel ,ac ,acinv ,r1 ,r2 ,r3 ,r4 ,r5 ,uzik1 ,btuz ,t1 ,t2 ,t3 ,bt ,c2iv ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,xvel ,yvel ,zvel ,ac ,acinv ,r1 ,r2 ,r3 ,r4 ,r5 ,uzik1 ,btuz ,t1 ,t2 ,t3 ,bt ,c2iv ,i ) ", "context_chars": 100, "text": "r4 ,r5 ,uzik1 ,btuz ,t1 ,t2 ,t3 ,bt ,c2iv ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\txvel = us[i][j][k];\n\tyvel = vs[i][j][k];\n\tzvel = ws[i][j][k];\n\tac = speed[i][j][k];\n\tacinv = ainv[i][j][k];\n\n\tac2u = ac*ac;\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tuzik1 = u[0][i][j][k];\n\tbtuz = bt * uzik1;\n\n\tt1 = btuz*acinv * (r4 + r5);\n\tt2 = r3 + t1;\n\tt3 = btuz * (r4 - r5);\n\n\trhs[0][i][j][k] = t2;\n\trhs[1][i][j][k] = -uzik1*r2 + xvel*t2;\n\trhs[2][i][j][k] = uzik1*r1 + yvel*t2;\n\trhs[3][i][j][k] = zvel*t2 + t3;\n\trhs[4][i][j][k] = uzik1*(-xvel*r2 + yvel*r1) + \n\t qs[i][j][k]*t2 + c2iv*ac2u*t1 + zvel*t3;\n } #pragma omp parallel for firstprivate(j ,k ,xvel ,yvel ,zvel ,ac ,acinv ,r1 ,r2 ,r3 ,r4 ,r5 ,uzik1 ,btuz ,t1 ,t2 ,t3 ,bt ,c2iv ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(dt ,m ) ", "context_chars": 100, "text": "----------------------------------------*/\n error_norm(xce);\n compute_rhs();\n\n rhs_norm(xcr);\n\n for (m = 0; m < 5; m++) {\n xcr[m] = xcr[m] / dt;\n } #pragma omp parallel for firstprivate(dt ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ) ", "context_chars": 100, "text": " \n for (m = 0; m < 5; m++) {\n xcr[m] = xcr[m] / dt;\n }\n\n *class = 'U';\n *verified = TRUE;\n\n for (m = 0; m < 5; m++) {\n xcrref[m] = 1.0;\n xceref[m] = 1.0;\n } #pragma omp parallel for firstprivate(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ) ", "context_chars": 100, "text": "he known reference values.\n--------------------------------------------------------------------*/\n for (m = 0; m < 5; m++) {\n \n xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]) ;\n xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);\n \n } #pragma omp parallel for firstprivate(m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------*/\n n = 0;\n for (i = 0; i <= grid_points[0]-3; i++) {\n i1 = i + 1;\n i2 = i + 2;\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(m ,k ,fac1 ,j ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i2][j][k] = rhs[m][i2][j][k] -\n\t lhs[n+0][i2][j][k]*rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,fac1 ,j ,i ) ", "context_chars": 100, "text": "\n i2 = i + 2;\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i2][j][k] = rhs[m][i2][j][k] -\n\t lhs[n+0][i2][j][k]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for firstprivate(m ,k ,fac1 ,j ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "--------------------------------------------*/\n\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(m ,k ,fac1 ,fac2 ,i ,i1 ,j ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n fac1 = 1.0/lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\tlhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\tlhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n }\n\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i1][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,fac1 ,fac2 ,i ,i1 ,j ) ", "context_chars": 100, "text": " i1 = grid_points[0]-1;\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n fac1 = 1.0/lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\tlhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\tlhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n }\n\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i1][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];\n }\n } #pragma omp parallel for firstprivate(m ,k ,fac1 ,fac2 ,i ,i1 ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i ,fac1 ,i1 ,m ,n ) ", "context_chars": 100, "text": " factors \n--------------------------------------------------------------------*/\n\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = 0; i <= grid_points[0]-3; i++) {\n i1 = i + 1;\n i2 = i + 2;\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\t lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\t rhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n\t lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];\n\t lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];\n\t rhs[m][i2][j][k] = rhs[m][i2][j][k] -\n\t lhs[n+0][i2][j][k]*rhs[m][i][j][k];\n\t}\n }\n }\n\n/*--------------------------------------------------------------------\nc And again the last two rows separately\n--------------------------------------------------------------------*/\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n \n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\trhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately\n--------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i1][j][k];\n\trhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];\n\n }\n }\n } #pragma omp parallel for firstprivate(j ,k ,i ,fac1 ,i1 ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "m-3+1)*5;\n for (i = 0; i <= grid_points[0]-3; i++) {\n i1 = i + 1;\n i2 = i + 2;\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\t lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\t rhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n\t lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];\n\t lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -\n\t lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];\n\t rhs[m][i2][j][k] = rhs[m][i2][j][k] -\n\t lhs[n+0][i2][j][k]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "----------------------------------*/\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -\n\t lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];\n\trhs[m][i1][j][k] = rhs[m][i1][j][k] -\n\t lhs[n+1][i1][j][k]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately\n--------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i1][j][k];\n\trhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----*/\n\n i = grid_points[0]-2;\n i1 = grid_points[0]-1;\n n = 0;\n for (m = 0; m < 3; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) ", "context_chars": 100, "text": " 0; m < 3; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n } #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) ", "context_chars": 100, "text": " {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n }\n }\n }\n\n for (m = 3; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n }\n }\n } #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n\n #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) \n for (m = 3; m < 5; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) ", "context_chars": 100, "text": " 3; m < 5; m++) {\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k];\n } #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "------*/\n n = 0;\n for (i = grid_points[0]-3; i >= 0; i--) {\n i1 = i + 1;\n i2 = i + 2;\n for (m = 0; m < 3; m++) {\n #pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": " i1 = i + 1;\n i2 = i + 2;\n #pragma omp parallel for \n for (m = 0; m < 3; m++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\t#pragma omp parallel for firstprivate(j ,k ,m ,i ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t}\n } #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,k ,m ,i ) ", "context_chars": 100, "text": "pragma omp parallel for firstprivate(j ,k ,m ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t} #pragma omp parallel for firstprivate(j ,k ,m ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) ", "context_chars": 100, "text": " And the remaining two\n--------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = grid_points[0]-3; i >= 0; i--) {\n i1 = i + 1;\n i2 = i + 2;\n #pragma omp parallel for \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "m-3+1)*5;\n for (i = grid_points[0]-3; i >= 0; i--) {\n i1 = i + 1;\n i2 = i + 2;\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i1][j][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i2][j][k];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "hsy();\n\n n = 0;\n\n for (j = 0; j <= grid_points[1]-3; j++) {\n j1 = j + 1;\n j2 = j + 2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(m ,k ,fac1 ,i ,j ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];\n\tlhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j2][k] = rhs[m][i][j2][k] -\n\t lhs[n+0][i][j2][k]*rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,fac1 ,i ,j ) ", "context_chars": 100, "text": "\n j2 = j + 2;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];\n\tlhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j2][k] = rhs[m][i][j2][k] -\n\t lhs[n+0][i][j2][k]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for firstprivate(m ,k ,fac1 ,i ,j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "--------------------------------------------*/\n\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(m ,k ,fac1 ,fac2 ,j ,j1 ,i ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n fac1 = 1./lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\tlhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\tlhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i][j1][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,k ,fac1 ,fac2 ,j ,j1 ,i ) ", "context_chars": 100, "text": " j1 = grid_points[1]-1;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n fac1 = 1./lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\tlhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\tlhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n }\n/*--------------------------------------------------------------------\nc scale the last row immediately \n--------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i][j1][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];\n }\n } #pragma omp parallel for firstprivate(m ,k ,fac1 ,fac2 ,j ,j1 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j ,fac1 ,j1 ,m ,n ) ", "context_chars": 100, "text": "c factors \n--------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (j = 0; j <= grid_points[1]-3; j++) {\n j1 = j + 1;\n j2 = j + 2;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\t rhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n\t lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];\n\t rhs[m][i][j2][k] = rhs[m][i][j2][k] -\n\t lhs[n+0][i][j2][k]*rhs[m][i][j][k];\n\t}\n }\n }\n\n/*--------------------------------------------------------------------\nc And again the last two rows separately\n--------------------------------------------------------------------*/\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\trhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately \n--------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i][j1][k];\n\trhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];\n }\n }\n } #pragma omp parallel for firstprivate(i ,k ,j ,fac1 ,j1 ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "m-3+1)*5;\n for (j = 0; j <= grid_points[1]-3; j++) {\n j1 = j + 1;\n j2 = j + 2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\t rhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n\t lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -\n\t lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];\n\t rhs[m][i][j2][k] = rhs[m][i][j2][k] -\n\t lhs[n+0][i][j2][k]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------------------------------------*/\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -\n\t lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];\n\trhs[m][i][j1][k] = rhs[m][i][j1][k] -\n\t lhs[n+1][i][j1][k]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately \n--------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i][j1][k];\n\trhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----*/\n\n j = grid_points[1]-2;\n j1 = grid_points[1]-1;\n n = 0;\n for (m = 0; m < 3; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) ", "context_chars": 100, "text": " 0; m < 3; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n } #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) ", "context_chars": 100, "text": " {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n }\n }\n }\n\n for (m = 3; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n }\n }\n } #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " }\n }\n\n #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) \n for (m = 3; m < 5; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) \n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) ", "context_chars": 100, "text": " 3; m < 5; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\tn = (m-3+1)*5;\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k];\n } #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) ", "context_chars": 100, "text": "rst three factors\n--------------------------------------------------------------------*/\n n = 0;\n for (m = 0; m < 3; m++) {\n for (j = grid_points[1]-3; j >= 0; j--) {\n j1 = j + 1;\n j2 = j + 2;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "3; m++) {\n for (j = grid_points[1]-3; j >= 0; j--) {\n j1 = j + 1;\n j2 = j + 2;\n for (i = 1; i <= grid_points[0]-2; i++) {\n\t#pragma omp parallel for firstprivate(i ,j ,k ,m ) \n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": " j2 = j + 2;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t} #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ,i ,j2 ,k ,m ,n ) ", "context_chars": 100, "text": " And the remaining two\n--------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (j = grid_points[1]-3; j >= 0; j--) {\n j1 = j + 1;\n j2 = j1 + 1;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(j ,i ,j2 ,k ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "m-3+1)*5;\n for (j = grid_points[1]-3; j >= 0; j--) {\n j1 = j + 1;\n j2 = j1 + 1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n\tfor (k = 1; k <= grid_points[2]-2; k++) {\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j1][k] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j2][k];\n\t}\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ON \nc-------------------------------------------------------------------*/\n\n lhsz();\n\n n = 0;\n\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(m ,j ,k ,fac1 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\tlhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,j ,k ,fac1 ,i ) ", "context_chars": 100, "text": "/\n\n lhsz();\n\n n = 0;\n\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t}\n\tlhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t}\n\tlhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\tlhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\tfor (m = 0; m < 3; m++) {\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for firstprivate(m ,j ,k ,fac1 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------------------------------------------*/\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(m ,j ,fac1 ,fac2 ,k ,k1 ,i ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n fac1 = 1./lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\tlhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\tlhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n }\n\n/*--------------------------------------------------------------------\nc scale the last row immediately\nc-------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i][j][k1];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m ,j ,fac1 ,fac2 ,k ,k1 ,i ) ", "context_chars": 100, "text": " k1 = grid_points[2]-1;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n fac1 = 1./lhs[n+2][i][j][k];\n lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n }\n lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\tlhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\tlhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n }\n\n/*--------------------------------------------------------------------\nc scale the last row immediately\nc-------------------------------------------------------------------*/\n fac2 = 1./lhs[n+2][i][j][k1];\n for (m = 0; m < 3; m++) {\n\trhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];\n }\n } #pragma omp parallel for firstprivate(m ,j ,fac1 ,fac2 ,k ,k1 ,i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,fac1 ,k1 ,m ,n ) ", "context_chars": 100, "text": "u-c factors \nc-------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,fac1 ,k1 ,m ,n ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n }\n }\n\n/*--------------------------------------------------------------------\nc And again the last two rows separately\nc-------------------------------------------------------------------*/\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\trhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately (some of this is overkill\nc if this is the last cell)\nc-------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i][j][k1];\n\trhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];\n\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,fac1 ,k1 ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "llel for firstprivate(i ,j ,k ,fac1 ,k1 ,m ,n ) \n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,fac1 ,k1 ,m ,n ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,fac1 ,k1 ,m ,n ) ", "context_chars": 100, "text": " n = (m-3+1)*5;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = 0; k <= grid_points[2]-3; k++) {\n\tk1 = k + 1;\n\tk2 = k + 2;\n\t fac1 = 1./lhs[n+2][i][j][k];\n\t lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\t lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\t lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\t lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n\t lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];\n\t lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -\n\t lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];\n\t rhs[m][i][j][k2] = rhs[m][i][j][k2] -\n\t lhs[n+0][i][j][k2]*rhs[m][i][j][k];\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,fac1 ,k1 ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "---------------------------------------*/\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfac1 = 1./lhs[n+2][i][j][k];\n\tlhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];\n\tlhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];\n\trhs[m][i][j][k] = fac1*rhs[m][i][j][k];\n\tlhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];\n\tlhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -\n\t lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];\n\trhs[m][i][j][k1] = rhs[m][i][j][k1] -\n\t lhs[n+1][i][j][k1]*rhs[m][i][j][k];\n/*--------------------------------------------------------------------\nc Scale the last row immediately (some of this is overkill\nc if this is the last cell)\nc-------------------------------------------------------------------*/\n\tfac2 = 1./lhs[n+2][i][j][k1];\n\trhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];\n\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "-----*/\n\n k = grid_points[2]-2;\n k1 = grid_points[2]-1;\n n = 0;\n for (m = 0; m < 3; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ) ", "context_chars": 100, "text": " 0; m < 3; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n } #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) ", "context_chars": 100, "text": " {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n }\n }\n }\n\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "p parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) \n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) ", "context_chars": 100, "text": " n = (m-3+1)*5;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\trhs[m][i][j][k] = rhs[m][i][j][k] -\n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1];\n } #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) ", "context_chars": 100, "text": "rst three factors\nc-------------------------------------------------------------------*/\n n = 0;\n for (m = 0; m < 3; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "n = 0;\n #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) \n for (m = 0; m < 3; m++) {\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ) ", "context_chars": 100, "text": " 0; m < 3; m++) {\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) ", "context_chars": 100, "text": " And the remaining two\nc-------------------------------------------------------------------*/\n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n }\n }\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "a omp parallel for firstprivate(i ,j ,k ,m ,n ) \n for (m = 3; m < 5; m++) {\n n = (m-3+1)*5;\n for (i = 1; i <= grid_points[0]-2; i++) {\n #pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) \n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) ", "context_chars": 100, "text": " n = (m-3+1)*5;\n #pragma omp parallel for \n for (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n\tfor (k = grid_points[2]-3; k >= 0; k--) {\n\t k1 = k + 1;\n\t k2 = k + 2;\n\t rhs[m][i][j][k] = rhs[m][i][j][k] - \n\t lhs[n+3][i][j][k]*rhs[m][i][j][k1] -\n\t lhs[n+4][i][j][k]*rhs[m][i][j][k2];\n\t}\n } #pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(j ) ", "context_chars": 100, "text": "uctured OpenMP C version\"\n\t \" - EP Benchmark\\n\");\n sprintf(size, \"%12.0f\", pow(2.0, M+1));\n for (j = 13; j >= 1; j--) {\n\tif (size[j] == '.') size[j] = ' ';\n } #pragma omp parallel for firstprivate(j ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for private(i ) ", "context_chars": 100, "text": "e.\n*/\n vranlc(0, &(dum[0]), dum[1], &(dum[2]));\n dum[0] = randlc(&(dum[1]), dum[2]);\n \n for (i = 0; i < 2*NK; i++) x[i] = -1.0e99;\n \n Mops = log(sqrt(fabs(max(1.0, 1.0))));\n\n timer_clear(1);\n timer_clear(2);\n timer_clear(3);\n timer_start(1);\n\n vranlc(0, &t1, A, x);\n\n/* Compute AN = A ^ (2 * NK) (mod 2^46). */\n\n t1 = A;\n\n for ( i = 1; i <= MK+1; i++) {\n\tt2 = randlc(&t1, t1);\n } #pragma omp parallel for private(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "2 = randlc(&t1, t1);\n }\n\n an = t1;\n tt = S;\n gc = 0.0;\n sx = 0.0;\n sy = 0.0;\n\n for ( i = 0; i <= NQ - 1; i++) {\n\tq[i] = 0.0;\n } #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": " t2, t3, t4, x1, x2;\n int kk, i, ik, l;\n double qq[NQ];\t\t/* private copy of q[0:NQ-1] */\n\n for (i = 0; i < NQ; i++) qq[i] = 0.0;\n\n #pragma omp parallel for reduction(+:sx) reduction(+:sy) \n for (k = 1; k <= np; k++) {\n\tkk = k_offset + k;\n\tt1 = S;\n\tt2 = an;\n\n/* Find starting seed t1 for this kk. */\n\n\tfor (i = 1; i <= 100; i++) {\n ik = kk / 2;\n if (2 * ik != kk) t3 = randlc(&t1, t2);\n if (ik == 0) break;\n t3 = randlc(&t2, t2);\n kk = ik;\n\t}\n\n/* Compute uniform pseudorandom numbers. */\n\n\tif (TIMERS_ENABLED == TRUE) timer_start(3);\n\tvranlc(2*NK, &t1, A, x-1);\n\tif (TIMERS_ENABLED == TRUE) timer_stop(3);\n\n/*\nc Compute Gaussian deviates by acceptance-rejection method and \nc tally counts in concentric square annuli. This loop is not \nc vectorizable.\n*/\n\tif (TIMERS_ENABLED == TRUE) timer_start(2);\n\n\tfor ( i = 0; i < NK; i++) {\n x1 = 2.0 * x[2*i] - 1.0;\n x2 = 2.0 * x[2*i+1] - 1.0;\n t1 = pow2(x1) + pow2(x2);\n if (t1 <= 1.0) {\n\t\tt2 = sqrt(-2.0 * log(t1) / t1);\n\t\tt3 = (x1 * t2);\t\t\t\t/* Xi */\n\t\tt4 = (x2 * t2);\t\t\t\t/* Yi */\n\t\tl = max(fabs(t3), fabs(t4));\n\t\tqq[l] += 1.0;\t\t\t\t/* counts */\n\t\tsx = sx + t3;\t\t\t\t/* sum of Xi */\n\t\tsy = sy + t4;\t\t\t\t/* sum of Yi */\n }\n\t}\n\tif (TIMERS_ENABLED == TRUE) timer_stop(2);\n } #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:sx) reduction(+:sy) ", "context_chars": 100, "text": "] */\n\n #pragma omp parallel for firstprivate(i ) \n for (i = 0; i < NQ; i++) qq[i] = 0.0;\n\n for (k = 1; k <= np; k++) {\n\tkk = k_offset + k;\n\tt1 = S;\n\tt2 = an;\n\n/* Find starting seed t1 for this kk. */\n\n\tfor (i = 1; i <= 100; i++) {\n ik = kk / 2;\n if (2 * ik != kk) t3 = randlc(&t1, t2);\n if (ik == 0) break;\n t3 = randlc(&t2, t2);\n kk = ik;\n\t}\n\n/* Compute uniform pseudorandom numbers. */\n\n\tif (TIMERS_ENABLED == TRUE) timer_start(3);\n\tvranlc(2*NK, &t1, A, x-1);\n\tif (TIMERS_ENABLED == TRUE) timer_stop(3);\n\n/*\nc Compute Gaussian deviates by acceptance-rejection method and \nc tally counts in concentric square annuli. This loop is not \nc vectorizable.\n*/\n\tif (TIMERS_ENABLED == TRUE) timer_start(2);\n\n\tfor ( i = 0; i < NK; i++) {\n x1 = 2.0 * x[2*i] - 1.0;\n x2 = 2.0 * x[2*i+1] - 1.0;\n t1 = pow2(x1) + pow2(x2);\n if (t1 <= 1.0) {\n\t\tt2 = sqrt(-2.0 * log(t1) / t1);\n\t\tt3 = (x1 * t2);\t\t\t\t/* Xi */\n\t\tt4 = (x2 * t2);\t\t\t\t/* Yi */\n\t\tl = max(fabs(t3), fabs(t4));\n\t\tqq[l] += 1.0;\t\t\t\t/* counts */\n\t\tsx = sx + t3;\t\t\t\t/* sum of Xi */\n\t\tsy = sy + t4;\t\t\t\t/* sum of Yi */\n }\n\t}\n\tif (TIMERS_ENABLED == TRUE) timer_stop(2);\n } #pragma omp parallel for reduction(+:sx) reduction(+:sy) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": ";\t\t\t\t/* sum of Yi */\n }\n\t}\n\tif (TIMERS_ENABLED == TRUE) timer_stop(2);\n }\n {\n for (i = 0; i <= NQ - 1; i++) q[i] += qq[i];\n }\n#if defined(_OPENMP)\n nthreads = omp_get_num_threads();\n /* _OPENMP */ \n} /* end of parallel region */ \n\n #pragma omp parallel for firstprivate(i ) reduction(+:gc) \n for (i = 0; i <= NQ-1; i++) {\n gc = gc + q[i];\n } #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) reduction(+:gc) ", "context_chars": 100, "text": " nthreads = omp_get_num_threads();\n#endif /* _OPENMP */ \n} /* end of parallel region */ \n\n for (i = 0; i <= NQ-1; i++) {\n gc = gc + q[i];\n } #pragma omp parallel for firstprivate(i ) reduction(+:gc) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "LT_DEFAULT;\n\tnit = NIT_DEFAULT;\n\tnx[lt] = NX_DEFAULT;\n\tny[lt] = NY_DEFAULT;\n\tnz[lt] = NZ_DEFAULT;\n\n\tfor (i = 0; i <= 7; i++) {\n\t debug_vec[i] = DEBUG_DEFAULT;\n\t} #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(lt ,k ) ", "context_chars": 100, "text": "r ( k = lt-1; k >= 1; k--) {\n\tnx[k] = nx[k+1]/2;\n\tny[k] = ny[k+1]/2;\n\tnz[k] = nz[k+1]/2;\n }\n\n for (k = 1; k <= lt; k++) {\n\tm1[k] = nx[k]+2;\n\tm2[k] = nz[k]+2;\n\tm3[k] = ny[k]+2;\n } #pragma omp parallel for firstprivate(lt ,k ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ,r1 ,r2 ) ", "context_chars": 100, "text": "-----------------------------------------------*/\n\n int i3, i2, i1;\n double r1[M], r2[M];\n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n #pragma omp parallel for firstprivate(n1 ,r ,i1 ,i2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tr1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]\n\t\t + r[i3-1][i2][i1] + r[i3+1][i2][i1];\n\t\tr2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]\n\t\t + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];\n\t }\n #pragma omp parallel for firstprivate(n1 ,c ,r ,u ,i1 ,i2 ,i3 ) \n for (i1 = 1; i1 < n1-1; i1++) {\n\t\tu[i3][i2][i1] = u[i3][i2][i1]\n\t\t + c[0] * r[i3][i2][i1]\n\t\t + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]\n\t\t\t + r1[i1] )\n\t\t + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );\n/*--------------------------------------------------------------------\nc Assume c(3) = 0 (Enable line below if c(3) not= 0)\nc---------------------------------------------------------------------\nc > + c(3) * ( r2(i1-1) + r2(i1+1) )\nc-------------------------------------------------------------------*/\n\t }\n\t}\n } #pragma omp parallel for private(i1 ,i2 ,i3 ,r1 ,r2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n1 ,r ,i1 ,i2 ,i3 ) ", "context_chars": 100, "text": ",i2 ,i3 ,r1 ,r2 ) \n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tr1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]\n\t\t + r[i3-1][i2][i1] + r[i3+1][i2][i1];\n\t\tr2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]\n\t\t + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];\n\t } #pragma omp parallel for firstprivate(n1 ,r ,i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n1 ,c ,r ,u ,i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "-1][i2-1][i1] + r[i3-1][i2+1][i1]\n\t\t + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];\n\t }\n for (i1 = 1; i1 < n1-1; i1++) {\n\t\tu[i3][i2][i1] = u[i3][i2][i1]\n\t\t + c[0] * r[i3][i2][i1]\n\t\t + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]\n\t\t\t + r1[i1] )\n\t\t + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );\n/*--------------------------------------------------------------------\nc Assume c(3) = 0 (Enable line below if c(3) not= 0)\nc---------------------------------------------------------------------\nc > + c(3) * ( r2(i1-1) + r2(i1+1) )\nc-------------------------------------------------------------------*/\n\t } #pragma omp parallel for firstprivate(n1 ,c ,r ,u ,i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ,u1 ,u2 ) ", "context_chars": 100, "text": "-----------------------------------------------*/\n\n int i3, i2, i1;\n double u1[M], u2[M];\n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n #pragma omp parallel for firstprivate(n1 ,u ,i1 ,i2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tu1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]\n\t\t + u[i3-1][i2][i1] + u[i3+1][i2][i1];\n\t\tu2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]\n\t\t + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];\n\t }\n\t for (i1 = 1; i1 < n1-1; i1++) {\n\t\tr[i3][i2][i1] = v[i3][i2][i1]\n\t\t - a[0] * u[i3][i2][i1]\n/*--------------------------------------------------------------------\nc Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0)\nc---------------------------------------------------------------------\nc > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3)\nc > + u1(i1) )\nc-------------------------------------------------------------------*/\n\t\t- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )\n\t\t - a[3] * ( u2[i1-1] + u2[i1+1] );\n\t }\n\t}\n } #pragma omp parallel for private(i1 ,i2 ,i3 ,u1 ,u2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n1 ,u ,i1 ,i2 ,i3 ) ", "context_chars": 100, "text": ",i2 ,i3 ,u1 ,u2 ) \n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tu1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]\n\t\t + u[i3-1][i2][i1] + u[i3+1][i2][i1];\n\t\tu2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]\n\t\t + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];\n\t } #pragma omp parallel for firstprivate(n1 ,u ,i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(j1 ,j2 ,j3 ,i1 ,i2 ,i3 ,x1 ,y1 ,x2 ,y2 ) ", "context_chars": 100, "text": "{\n d2 = 1;\n }\n\n if (m3k == 3) {\n d3 = 2;\n } else {\n d3 = 1;\n }\n for (j3 = 1; j3 < m3j-1; j3++) {\n\ti3 = 2*j3-d3;\n/*C i3 = 2*j3-1*/\n\tfor (j2 = 1; j2 < m2j-1; j2++) {\n i2 = 2*j2-d2;\n/*C i2 = 2*j2-1*/\n\n #pragma omp parallel for firstprivate(m1j ,d2 ,d3 ,i1 ,d1 ,i2 ,i3 ,r ,j1 ,j2 ,j3 ) \n for (j1 = 1; j1 < m1j; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\tx1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]\n\t\t + r[i3][i2+1][i1] + r[i3+2][i2+1][i1];\n\t\ty1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]\n\t\t + r[i3][i2+2][i1] + r[i3+2][i2+2][i1];\n\t }\n\n for (j1 = 1; j1 < m1j-1; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\ty2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1]\n\t\t + r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1];\n\t\tx2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1]\n\t\t + r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1];\n\t\ts[j3][j2][j1] =\n\t\t 0.5 * r[i3+1][i2+1][i1+1]\n\t\t + 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)\n\t\t + 0.125 * ( x1[i1] + x1[i1+2] + y2)\n\t\t + 0.0625 * ( y1[i1] + y1[i1+2] );\n\t }\n\t}\n } #pragma omp parallel for private(j1 ,j2 ,j3 ,i1 ,i2 ,i3 ,x1 ,y1 ,x2 ,y2 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(m1j ,d2 ,d3 ,i1 ,d1 ,i2 ,i3 ,r ,j1 ,j2 ,j3 ) ", "context_chars": 100, "text": "for (j2 = 1; j2 < m2j-1; j2++) {\n i2 = 2*j2-d2;\n/*C i2 = 2*j2-1*/\n\n for (j1 = 1; j1 < m1j; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\tx1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]\n\t\t + r[i3][i2+1][i1] + r[i3+2][i2+1][i1];\n\t\ty1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]\n\t\t + r[i3][i2+2][i1] + r[i3+2][i2+2][i1];\n\t } #pragma omp parallel for firstprivate(m1j ,d2 ,d3 ,i1 ,d1 ,i2 ,i3 ,r ,j1 ,j2 ,j3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ,z1 ,z2 ,z3 ) ", "context_chars": 100, "text": " parameter( m=535 )\n*/\n double z1[M], z2[M], z3[M];\n\n if ( n1 != 3 && n2 != 3 && n3 != 3 ) {\n\tfor (i3 = 0; i3 < mm3-1; i3++) {\n for (i2 = 0; i2 < mm2-1; i2++) {\n\t\t#pragma omp parallel for firstprivate(mm1 ,z ,i1 ,i2 ,i3 ) \n\t\tfor (i1 = 0; i1 < mm1; i1++) {\n\t\t z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];\n\t\t z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];\n\t\t z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]\n\t\t\t+z[i3][i2][i1];\n\t\t u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]\n\t\t\t+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);\n\t\t}\n\t\t#pragma omp parallel for firstprivate(mm1 ,u ,i1 ,i2 ,i3 ) \n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]\n\t\t\t+0.5 * z1[i1];\n\t\t u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]\n\t\t\t+0.25*( z1[i1] + z1[i1+1] );\n\t\t}\n\t\t#pragma omp parallel for firstprivate(mm1 ,u ,i1 ,i2 ,i3 ) \n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]\n\t\t\t+0.5 * z2[i1];\n\t\t u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]\n\t\t\t+0.25*( z2[i1] + z2[i1+1] );\n\t\t}\n\t\t#pragma omp parallel for firstprivate(mm1 ,u ,i1 ,i2 ,i3 ) \n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]\n\t\t\t+0.25* z3[i1];\n\t\t u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]\n\t\t\t+0.125*( z3[i1] + z3[i1+1] );\n\t\t}\n\t }\n\t} #pragma omp parallel for private(i1 ,i2 ,i3 ,z1 ,z2 ,z3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mm1 ,z ,i1 ,i2 ,i3 ) ", "context_chars": 100, "text": ",i3 ,z1 ,z2 ,z3 ) \n\tfor (i3 = 0; i3 < mm3-1; i3++) {\n for (i2 = 0; i2 < mm2-1; i2++) {\n\t\tfor (i1 = 0; i1 < mm1; i1++) {\n\t\t z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];\n\t\t z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];\n\t\t z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];\n\t\t} #pragma omp parallel for firstprivate(mm1 ,z ,i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mm1 ,u ,i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "\n\t\t u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]\n\t\t\t+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]\n\t\t\t+0.5 * z1[i1];\n\t\t u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]\n\t\t\t+0.25*( z1[i1] + z1[i1+1] );\n\t\t} #pragma omp parallel for firstprivate(mm1 ,u ,i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mm1 ,u ,i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "[i1];\n\t\t u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]\n\t\t\t+0.25*( z1[i1] + z1[i1+1] );\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]\n\t\t\t+0.5 * z2[i1];\n\t\t u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]\n\t\t\t+0.25*( z2[i1] + z2[i1+1] );\n\t\t} #pragma omp parallel for firstprivate(mm1 ,u ,i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(mm1 ,u ,i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "[i1];\n\t\t u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]\n\t\t\t+0.25*( z2[i1] + z2[i1+1] );\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]\n\t\t\t+0.25* z3[i1];\n\t\t u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]\n\t\t\t+0.125*( z3[i1] + z3[i1+1] );\n\t\t} #pragma omp parallel for firstprivate(mm1 ,u ,i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ) ", "context_chars": 100, "text": " d3 = 2;\n t3 = 1;\n\t} else {\n d3 = 1;\n t3 = 0;\n\t}\n \n {\n\tfor ( i3 = d3; i3 <= mm3-1; i3++) {\n for ( i2 = d2; i2 <= mm2-1; i2++) {\n\t\tfor ( i1 = d1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] =\n\t\t\tu[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1]\n\t\t\t+z[i3-1][i2-1][i1-1];\n\t\t}\n\t\tfor ( i1 = 1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] =\n\t\t\tu[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1]\n\t\t\t+0.5*(z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n for ( i2 = 1; i2 <= mm2-1; i2++) {\n\t\tfor ( i1 = d1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] =\n\t\t\tu[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1]\n\t\t\t+0.5*(z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t\tfor ( i1 = 1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] =\n\t\t\tu[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1]\n\t\t\t+0.25*(z[i3-1][i2][i1]+z[i3-1][i2-1][i1]\n\t\t\t +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n\t} #pragma omp parallel for private(i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "z[i3-1][i2][i1]+z[i3-1][i2-1][i1]\n\t\t\t +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n\t}\n\tfor ( i3 = 1; i3 <= mm3-1; i3++) {\n for ( i2 = d2; i2 <= mm2-1; i2++) {\n\t\tfor ( i1 = d1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] =\n\t\t\tu[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1]\n\t\t\t+0.5*(z[i3][i2-1][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t\tfor ( i1 = 1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] =\n\t\t\tu[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1]\n\t\t\t+0.25*(z[i3][i2-1][i1]+z[i3][i2-1][i1-1]\n\t\t\t +z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n\t for ( i2 = 1; i2 <= mm2-1; i2++) {\n\t\tfor ( i1 = d1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] =\n\t\t\tu[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1]\n\t\t\t+0.25*(z[i3][i2][i1-1]+z[i3][i2-1][i1-1]\n\t\t\t +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t\tfor ( i1 = 1; i1 <= mm1-1; i1++) {\n\t\t u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] =\n\t\t\tu[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1]\n\t\t\t+0.125*(z[i3][i2][i1]+z[i3][i2-1][i1]\n\t\t\t\t+z[i3][i2][i1-1]+z[i3][i2-1][i1-1]\n\t\t\t\t+z[i3-1][i2][i1]+z[i3-1][i2-1][i1]\n\t\t\t\t+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);\n\t\t}\n\t }\n\t} #pragma omp parallel for private(i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ,a ) reduction(+:tmp) reduction(+:s) ", "context_chars": 100, "text": "\n\n double s = 0.0;\n int i3, i2, i1, n;\n double a = 0.0, tmp = 0.0;\n\n n = nx*ny*nz;\n\n for (i3 = 1; i3 < n3-1; i3++) {\n\t#pragma omp parallel for firstprivate(n3 ,i2 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) \n\tfor (i2 = 1; i2 < n2-1; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i2 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) \n for (i1 = 1; i1 < n1-1; i1++) {\n\t\ts = s + r[i3][i2][i1] * r[i3][i2][i1];\n\t\ta = fabs(r[i3][i2][i1]);\n\t\tif (a > tmp) tmp = a;\n\t }\n\t}\n } #pragma omp parallel for private(i1 ,i2 ,i3 ,a ) reduction(+:tmp) reduction(+:s) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i2 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) ", "context_chars": 100, "text": "l for private(i1 ,i2 ,i3 ,a ) reduction(+:tmp) reduction(+:s) \n for (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i2 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) \n for (i1 = 1; i1 < n1-1; i1++) {\n\t\ts = s + r[i3][i2][i1] * r[i3][i2][i1];\n\t\ta = fabs(r[i3][i2][i1]);\n\t\tif (a > tmp) tmp = a;\n\t }\n\t} #pragma omp parallel for firstprivate(n3 ,i2 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i2 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) ", "context_chars": 100, "text": " ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) \n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 1; i1 < n1-1; i1++) {\n\t\ts = s + r[i3][i2][i1] * r[i3][i2][i1];\n\t\ta = fabs(r[i3][i2][i1]);\n\t\tif (a > tmp) tmp = a;\n\t } #pragma omp parallel for firstprivate(n3 ,i2 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "--------------------------------------------------*/\n\n int i1, i2, i3;\n\n /* axis = 1 */\n{\n for ( i3 = 1; i3 < n3-1; i3++) {\n\t#pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,u ,n1 ,n2 ,i3 ) \n\tfor ( i2 = 1; i2 < n2-1; i2++) {\n\t u[i3][i2][n1-1] = u[i3][i2][1];\n\t u[i3][i2][0] = u[i3][i2][n1-2];\n\t}\n// } #pragma omp parallel for private(i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,u ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": "s = 1 */\n{\n #pragma omp parallel for private(i1 ,i2 ,i3 ) \n for ( i3 = 1; i3 < n3-1; i3++) {\n\tfor ( i2 = 1; i2 < n2-1; i2++) {\n\t u[i3][i2][n1-1] = u[i3][i2][1];\n\t u[i3][i2][0] = u[i3][i2][n1-2];\n\t} #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,u ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,u ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": "i2][n1-2];\n\t}\n// }\n\n /* axis = 2 */\n//#pragma omp for\n// for ( i3 = 1; i3 < n3-1; i3++) {\n\tfor ( i1 = 0; i1 < n1; i1++) {\n\t u[i3][n2-1][i1] = u[i3][1][i1];\n\t u[i3][0][i1] = u[i3][n2-2][i1];\n\t} #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,u ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "i3][n2-1][i1] = u[i3][1][i1];\n\t u[i3][0][i1] = u[i3][n2-2][i1];\n\t}\n }\n\n /* axis = 3 */\n for ( i2 = 0; i2 < n2; i2++) {\n\tfor ( i1 = 0; i1 < n1; i1++) {\n\t u[n3-1][i2][i1] = u[1][i2][i1];\n\t u[0][i2][i1] = u[n3-2][i2][i1];\n\t}\n } #pragma omp parallel for private(i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "ks for twenty candidates\nc-------------------------------------------------------------------*/\n for (i = 0; i < MM; i++) {\n\tten[i][1] = 0.0;\n\tj1[i][1] = 0;\n\tj2[i][1] = 0;\n\tj3[i][1] = 0;\n\tten[i][0] = 1.0;\n\tj1[i][0] = 0;\n\tj2[i][0] = 0;\n\tj3[i][0] = 0;\n } #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i2 ,i1 ) ", "context_chars": 100, "text": "n\");\n for (i = MM-1; i >= 0; i--) {\n\tprintf(\" %4d\", jg[0][i][1]);\n }\n printf(\"\\n\");*/\n\n for (i3 = 0; i3 < n3; i3++) {\n\t#pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) \n\tfor (i2 = 0; i2 < n2; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for private(i2 ,i1 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": " printf(\"\\n\");*/\n\n #pragma omp parallel for private(i2 ,i1 ) \n for (i3 = 0; i3 < n3; i3++) {\n\tfor (i2 = 0; i2 < n2; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t} #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": "p parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) \n\tfor (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t } #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1 ,i2 ,i3 ) ", "context_chars": 100, "text": "---\nc-------------------------------------------------------------------*/\n\n int i1, i2, i3;\n for (i3 = 0;i3 < n3; i3++) {\n\t#pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) \n\tfor (i2 = 0; i2 < n2; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for private(i1 ,i2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": "nt i1, i2, i3;\n #pragma omp parallel for private(i1 ,i2 ,i3 ) \n for (i3 = 0;i3 < n3; i3++) {\n\tfor (i2 = 0; i2 < n2; i2++) {\n #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) \n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t} #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) ", "context_chars": 100, "text": "p parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) \n\tfor (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t } #pragma omp parallel for firstprivate(n3 ,i1 ,i2 ,z ,n1 ,n2 ,i3 ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) reduction(+:j) ", "context_chars": 100, "text": "2[i];\n\n\n/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */\n\n j = 0;\n for( i=1; i key_array[i] )\n j++;\n\n\n if( j != 0 )\n {\n printf( \"Full_verify: number of keys out of sort: %d\\n\",\n j );\n } #pragma omp parallel for firstprivate(i ) reduction(+:j) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "est keys are, load into */\n/* top of array bucket_size */\n for( i=0; i #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": " */\n for( i=0; ifor( i=0; i #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/baseline/NPB3.0-omp-c/IS/is.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(i ) ", "context_chars": 100, "text": "le timecounter, maxtime;\n\n\n\n/* Initialize the verification arrays if a valid class */\n for( i=0; i #pragma omp parallel for firstprivate(i ) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB021-reductionmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp,i,j)", "context_chars": 100, "text": "oat u[100][100];\n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n u[i][j] = 0.5; \n\nfor (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private (temp,i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB019-plusplus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i(argv[1]);\n\n int input[inLen]; \n int output[inLen];\n for (i=0; ifor (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB012-minusminus-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " x[len]; \n\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n }\n\nfor (i=numNodes-1 ; i>-1 ; --i) {\n if (x[i]<=0) {\n numNodes2-- ;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB022-reductionmissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private (temp,i,j)", "context_chars": 100, "text": "loat u[len][len];\n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n u[i][j] = 0.5;\n\nfor (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n {\n temp = u[i][j];\n sum = sum + temp * temp;\n } #pragma omp parallel for private (temp,i,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB016-outputdep-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": ". x@74:5 vs. x@74:5\n*/\n\n#include \nint a[100];\n\nint main()\n{\n int len=100; \n int i,x=10;\n\nfor (i=0;i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB109-orderedmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ordered", "context_chars": 100, "text": " * Missing the ordered clause\n * Data race pair: x@56:5 vs. x@56:5\n * */\nint main()\n{\n int x =0;\n for (int i = 0; i < 100; ++i) {\n x++;\n } #pragma omp parallel for ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB008-indirectaccess4-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "/ initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n }\n\nfor (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0;\n xa2[idx]+= 3.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB017-outputdep-var-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "ar* argv[])\n{\n int len=100; \n\n if (argc>1)\n len = atoi(argv[1]);\n\n int a[len];\n int i,x=10;\n\nfor (i=0;i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB018-plusplus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")\n{\n int i ;\n int inLen=1000 ; \n int outLen = 0;\n\n for (i=0; ifor (i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB035-truedepscalar-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "int main(int argc, char* argv[])\n{ \n int i; \n int tmp;\n tmp = 10;\n int len=100;\n\n int a[100];\n\nfor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB036-truedepscalar-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt i; \n int tmp;\n tmp = 10;\n int len=100;\n\n if (argc>1)\n len = atoi(argv[1]);\n\n int a[len];\nfor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB065-pireduction-orig-no.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:pi) private(x)", "context_chars": 100, "text": "uble pi = 0.0;\n long int i;\n double x, interval_width;\n interval_width = 1.0/(double)num_steps;\n\nfor (i = 0; i < num_steps; i++) {\n x = (i+ 0.5) * interval_width;\n pi += 1.0 / (x*x + 1.0);\n } #pragma omp parallel for reduction(+:pi) private(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "nt len=100;\n if (argc>1)\n len = atoi(argv[1]);\n int a[len];\n for (i=0;ifor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB006-indirectaccess2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "/ initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n }\n\nfor (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0;\n xa2[idx]+= 3.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB111-linearmissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "for (i=0;i #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "char* argv[])\n{\n int i;\n int tmp;\n int len=100;\n int a[100];\n for (i=0;ifor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "0; \n\n double a[20][20];\n\n for (i=0; i< len; i++)\n for (j=0; jfor (i = 0; i < len - 1; i += 1) {\n for (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB005-indirectaccess1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,1)", "context_chars": 100, "text": "[i]=0.5*i;\n }\n// default static even scheduling may not trigger data race, using static,1 instead.\nfor (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0 + i;\n xa2[idx]+= 3.0 + i;\n } #pragma omp parallel for schedule(static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB007-indirectaccess3-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "/ initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n }\n\nfor (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0;\n xa2[idx]+= 3.0;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB011-minusminus-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ialize x[]\n for (i=0; i< len; i++)\n {\n if (i%2==0)\n x[i]=5;\n else\n x[i]= -5;\n }\n\nfor (i=numNodes-1 ; i>-1 ; --i) {\n if (x[i]<=0) {\n numNodes2-- ;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": ";\n\n double a[len][len];\n\n for (i=0; i< len; i++)\n for (j=0; jfor (i = 0; i < len - 1; i += 1) {\n for (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n }\n } #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/b+tree/kernel/kernel_cpu_2.c", "omp_pragma_line": "#pragma omp parallel for private (i, thid)", "context_chars": 100, "text": "==================150\n\n\t// private thread IDs\n\tint thid;\n\tint bid;\n\n\t// process number of querries\n\tfor(bid = 0; bid < count; bid++){\n\n\t\t// process levels of the tree\n\t\tfor(i = 0; i < maxheight; i++){\n\n\t\t\t// process all leaves at each level\n\t\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\t\tif((knodes[currKnode[bid]].keys[thid] <= start[bid]) && (knodes[currKnode[bid]].keys[thid+1] > start[bid])){\n\t\t\t\t\t// this conditional statement is inserted to avoid crush due to but in original code\n\t\t\t\t\t// \"offset[bid]\" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault\n\t\t\t\t\t// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address\n\t\t\t\t\tif(knodes[currKnode[bid]].indices[thid] < knodes_elem){\n\t\t\t\t\t\toffset[bid] = knodes[currKnode[bid]].indices[thid];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif((knodes[lastKnode[bid]].keys[thid] <= end[bid]) && (knodes[lastKnode[bid]].keys[thid+1] > end[bid])){\n\t\t\t\t\t// this conditional statement is inserted to avoid crush due to but in original code\n\t\t\t\t\t// \"offset_2[bid]\" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault\n\t\t\t\t\t// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address\n\t\t\t\t\tif(knodes[lastKnode[bid]].indices[thid] < knodes_elem){\n\t\t\t\t\t\toffset_2[bid] = knodes[lastKnode[bid]].indices[thid];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// set for next tree level\n\t\t\tcurrKnode[bid] = offset[bid];\n\t\t\tlastKnode[bid] = offset_2[bid];\n\n\t\t}\n\n\t\t// process leaves\n\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\t// Find the index of the starting record\n\t\t\tif(knodes[currKnode[bid]].keys[thid] == start[bid]){\n\t\t\t\trecstart[bid] = knodes[currKnode[bid]].indices[thid];\n\t\t\t}\n\n\t\t}\n\n\t\t// process leaves\n\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\t// Find the index of the ending record\n\t\t\tif(knodes[lastKnode[bid]].keys[thid] == end[bid]){\n\t\t\t\treclength[bid] = knodes[lastKnode[bid]].indices[thid] - recstart[bid]+1;\n\t\t\t}\n\n\t\t}\n\n\t} #pragma omp parallel for private (i, thid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/b+tree/kernel/kernel_cpu.c", "omp_pragma_line": "#pragma omp parallel for private (i, thid)", "context_chars": 100, "text": "==========150\n\n\t// private thread IDs\n\tint thid;\n\tint bid;\n\tint i;\n\n\t// process number of querries\n\tfor(bid = 0; bid < count; bid++){\n\n\t\t// process levels of the tree\n\t\tfor(i = 0; i < maxheight; i++){\n\n\t\t\t// process all leaves at each level\n\t\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\t\t// if value is between the two keys\n\t\t\t\tif((knodes[currKnode[bid]].keys[thid]) <= keys[bid] && (knodes[currKnode[bid]].keys[thid+1] > keys[bid])){\n\t\t\t\t\t// this conditional statement is inserted to avoid crush due to but in original code\n\t\t\t\t\t// \"offset[bid]\" calculated below that addresses knodes[] in the next iteration goes outside of its bounds cause segmentation fault\n\t\t\t\t\t// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address\n\t\t\t\t\tif(knodes[offset[bid]].indices[thid] < knodes_elem){\n\t\t\t\t\t\toffset[bid] = knodes[offset[bid]].indices[thid];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// set for next tree level\n\t\t\tcurrKnode[bid] = offset[bid];\n\n\t\t}\n\n\t\t//At this point, we have a candidate leaf node which may contain\n\t\t//the target record. Check each key to hopefully find the record\n\t\t// process all leaves at each level\n\t\tfor(thid = 0; thid < threadsPerBlock; thid++){\n\n\t\t\tif(knodes[currKnode[bid]].keys[thid] == keys[bid]){\n\t\t\t\tans[bid].value = records[knodes[currKnode[bid]].indices[thid]].value;\n\t\t\t}\n\n\t\t}\n\n\t} #pragma omp parallel for private (i, thid)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/bfs/bfs.cpp", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "/omp_set_num_threads(num_omp_threads);\n #ifdef OMP_OFFLOAD\n #pragma omp target\n #endif\n for(int tid = 0; tid < no_of_nodes; tid++ )\n {\n if (h_graph_mask[tid] == true){ \n h_graph_mask[tid]=false;\n for(int i=h_graph_nodes[tid].starting; i<(h_graph_nodes[tid].no_of_edges + h_graph_nodes[tid].starting); i++)\n {\n int id = h_graph_edges[i];\n if(!h_graph_visited[id])\n {\n h_cost[id]=h_cost[tid]+1;\n h_updating_graph_mask[id]=true;\n }\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/bfs/bfs.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "}\n }\n\n#ifdef OPEN\n #ifdef OMP_OFFLOAD\n #pragma omp target map(stop)\n #endif\n for(int tid=0; tid< no_of_nodes ; tid++ )\n {\n if (h_updating_graph_mask[tid] == true){\n h_graph_mask[tid]=true;\n h_graph_visited[tid]=true;\n stop=true;\n h_updating_graph_mask[tid]=false;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(static)", "context_chars": 100, "text": "OFFLOAD\n#pragma omp declare target\n#endif\ntemplate \nvoid copy(T* dst, T* src, int N)\n{\n\tfor(int i = 0; i < N; i++)\n\t{\n\t\tdst[i] = src[i];\n\t} #pragma omp parallel for default(shared) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(static)", "context_chars": 100, "text": "] << std::endl;\n\t}\n\n}\n\nvoid initialize_variables(int nelr, float* variables, float* ff_variable)\n{\n\tfor(int i = 0; i < nelr; i++)\n\t{\n\t\tfor(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j];\n\t} #pragma omp parallel for default(shared) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(auto)", "context_chars": 100, "text": "step_factor(int nelr, float* __restrict variables, float* areas, float* __restrict step_factors)\n{\n\tfor(int blk = 0; blk < nelr/block_length; ++blk)\n {\n int b_start = blk*block_length;\n int b_end = (blk+1)*block_length > nelr ? nelr : (blk+1)*block_length;\n#pragma omp simd\n\tfor(int i = b_start; i < b_end; i++)\n\t{\n\t\tfloat density = variables[i + VAR_DENSITY*nelr];\n\n\t\tfloat3 momentum;\n\t\tmomentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];\n\t\tmomentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];\n\t\tmomentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];\n\n\t\tfloat density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];\n\t\tfloat3 velocity;\t compute_velocity(density, momentum, velocity);\n\t\tfloat speed_sqd = compute_speed_sqd(velocity);\n\t\tfloat pressure = compute_pressure(density, density_energy, speed_sqd);\n\t\tfloat speed_of_sound = compute_speed_of_sound(density, pressure);\n\n\t\t// dt = float(0.5f) * std::sqrt(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once\n\t\tstep_factors[i] = float(0.5f) / (std::sqrt(areas[i]) * (std::sqrt(speed_sqd) + speed_of_sound));\n\t}\n } #pragma omp parallel for default(shared) schedule(auto)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(auto)", "context_chars": 100, "text": ", float3 ff_flux_contribution_density_energy)\n{\n\tconst float smoothing_coefficient = float(0.2f);\n\n\tfor(int blk = 0; blk < nelr/block_length; ++blk)\n {\n int b_start = blk*block_length;\n int b_end = (blk+1)*block_length > nelr ? nelr : (blk+1)*block_length;\n#pragma omp simd\n\tfor(int i = b_start; i < b_end; ++i)\n\t{\n float density_i = variables[i + VAR_DENSITY*nelr];\n\t\tfloat3 momentum_i;\n\t\tmomentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];\n\t\tmomentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];\n\t\tmomentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];\n\n\t\tfloat density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];\n\n\t\tfloat3 velocity_i; \t\t\t\t compute_velocity(density_i, momentum_i, velocity_i);\n\t\tfloat speed_sqd_i = compute_speed_sqd(velocity_i);\n\t\tfloat speed_i = std::sqrt(speed_sqd_i);\n\t\tfloat pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);\n\t\tfloat speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);\n\t\tfloat3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;\n\t\tfloat3 flux_contribution_i_density_energy;\n\t\tcompute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);\n\n\t\tfloat flux_i_density = float(0.0f);\n\t\tfloat3 flux_i_momentum;\n\t\tflux_i_momentum.x = float(0.0f);\n\t\tflux_i_momentum.y = float(0.0f);\n\t\tflux_i_momentum.z = float(0.0f);\n\t\tfloat flux_i_density_energy = float(0.0f);\n\n\t\tfloat3 velocity_nb;\n\t\tfloat density_nb, density_energy_nb;\n\t\tfloat3 momentum_nb;\n\t\tfloat3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;\n\t\tfloat3 flux_contribution_nb_density_energy;\n\t\tfloat speed_sqd_nb, speed_of_sound_nb, pressure_nb;\n#pragma unroll\n\t\tfor(int j = 0; j < NNB; j++)\n\t\t{\n float3 normal; float normal_len;\n\t\t float factor;\n\n\t\t\tint nb = elements_surrounding_elements[i + j*nelr];\n\t\t\tnormal.x = normals[i + (j + 0*NNB)*nelr];\n\t\t\tnormal.y = normals[i + (j + 1*NNB)*nelr];\n\t\t\tnormal.z = normals[i + (j + 2*NNB)*nelr];\n\t\t\tnormal_len = std::sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);\n\n\t\t\tif(nb >= 0) \t// a legitimate neighbor\n\t\t\t{\n\t\t\t\tdensity_nb = variables[nb + VAR_DENSITY*nelr];\n\t\t\t\tmomentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];\n\t\t\t\tmomentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];\n\t\t\t\tmomentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];\n\t\t\t\tdensity_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];\n\t\t\t\t\t\t\t\t\t\t\t\t\tcompute_velocity(density_nb, momentum_nb, velocity_nb);\n\t\t\t\tspeed_sqd_nb = compute_speed_sqd(velocity_nb);\n\t\t\t\tpressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);\n\t\t\t\tspeed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);\n\t\t\t\t\t\t\t\t\t\t\t\t\tcompute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);\n\n\t\t\t\t// artificial viscosity\n\t\t\t\tfactor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + std::sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);\n\t\t\t\tflux_i_density += factor*(density_i-density_nb);\n\t\t\t\tflux_i_density_energy += factor*(density_energy_i-density_energy_nb);\n\t\t\t\tflux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);\n\t\t\t\tflux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);\n\t\t\t\tflux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);\n\n\t\t\t\t// accumulate cell-centered fluxes\n\t\t\t\tfactor = float(0.5f)*normal.x;\n\t\t\t\tflux_i_density += factor*(momentum_nb.x+momentum_i.x);\n\t\t\t\tflux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);\n\t\t\t\tflux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);\n\t\t\t\tflux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);\n\t\t\t\tflux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);\n\n\t\t\t\tfactor = float(0.5f)*normal.y;\n\t\t\t\tflux_i_density += factor*(momentum_nb.y+momentum_i.y);\n\t\t\t\tflux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);\n\t\t\t\tflux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);\n\t\t\t\tflux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);\n\t\t\t\tflux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);\n\n\t\t\t\tfactor = float(0.5f)*normal.z;\n\t\t\t\tflux_i_density += factor*(momentum_nb.z+momentum_i.z);\n\t\t\t\tflux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);\n\t\t\t\tflux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);\n\t\t\t\tflux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);\n\t\t\t\tflux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);\n\t\t\t}\n\t\t\telse if(nb == -1)\t// a wing boundary\n\t\t\t{\n\t\t\t\tflux_i_momentum.x += normal.x*pressure_i;\n\t\t\t\tflux_i_momentum.y += normal.y*pressure_i;\n\t\t\t\tflux_i_momentum.z += normal.z*pressure_i;\n\t\t\t}\n\t\t\telse if(nb == -2) // a far field boundary\n\t\t\t{\n\t\t\t\tfactor = float(0.5f)*normal.x;\n\t\t\t\tflux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);\n\t\t\t\tflux_i_density_energy += factor*(ff_flux_contribution_density_energy.x+flux_contribution_i_density_energy.x);\n\t\t\t\tflux_i_momentum.x += factor*(ff_flux_contribution_momentum_x.x + flux_contribution_i_momentum_x.x);\n\t\t\t\tflux_i_momentum.y += factor*(ff_flux_contribution_momentum_y.x + flux_contribution_i_momentum_y.x);\n\t\t\t\tflux_i_momentum.z += factor*(ff_flux_contribution_momentum_z.x + flux_contribution_i_momentum_z.x);\n\n\t\t\t\tfactor = float(0.5f)*normal.y;\n\t\t\t\tflux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);\n\t\t\t\tflux_i_density_energy += factor*(ff_flux_contribution_density_energy.y+flux_contribution_i_density_energy.y);\n\t\t\t\tflux_i_momentum.x += factor*(ff_flux_contribution_momentum_x.y + flux_contribution_i_momentum_x.y);\n\t\t\t\tflux_i_momentum.y += factor*(ff_flux_contribution_momentum_y.y + flux_contribution_i_momentum_y.y);\n\t\t\t\tflux_i_momentum.z += factor*(ff_flux_contribution_momentum_z.y + flux_contribution_i_momentum_z.y);\n\n\t\t\t\tfactor = float(0.5f)*normal.z;\n\t\t\t\tflux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);\n\t\t\t\tflux_i_density_energy += factor*(ff_flux_contribution_density_energy.z+flux_contribution_i_density_energy.z);\n\t\t\t\tflux_i_momentum.x += factor*(ff_flux_contribution_momentum_x.z + flux_contribution_i_momentum_x.z);\n\t\t\t\tflux_i_momentum.y += factor*(ff_flux_contribution_momentum_y.z + flux_contribution_i_momentum_y.z);\n\t\t\t\tflux_i_momentum.z += factor*(ff_flux_contribution_momentum_z.z + flux_contribution_i_momentum_z.z);\n\n\t\t\t}\n }\n\t\tfluxes[i + VAR_DENSITY*nelr] = flux_i_density;\n\t\tfluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;\n\t\tfluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;\n\t\tfluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;\n\t\tfluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;\n \n\t}\n } #pragma omp parallel for default(shared) schedule(auto)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(auto)", "context_chars": 100, "text": "(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)\n{\n for(int blk = 0; blk < nelr/block_length; ++blk)\n {\n int b_start = blk*block_length;\n int b_end = (blk+1)*block_length > nelr ? nelr : (blk+1)*block_length;\n #pragma omp simd\n for(int i = b_start; i < b_end; ++i)\n {\n float factor = step_factors[i]/float(RK+1-j);\n\n variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];\n variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];\n variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];\n variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];\n variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];\n\n }\n } #pragma omp parallel for default(shared) schedule(auto)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu_double.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(static)", "context_chars": 100, "text": "d dealloc(T* array)\n{\n\tdelete[] array;\n}\n\ntemplate \nvoid copy(T* dst, T* src, int N)\n{\n\tfor(int i = 0; i < N; i++)\n\t{\n\t\tdst[i] = src[i];\n\t} #pragma omp parallel for default(shared) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu_double.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(static)", "context_chars": 100, "text": "e3 ff_flux_contribution_density_energy;\n\n\nvoid initialize_variables(int nelr, double* variables)\n{\n\tfor(int i = 0; i < nelr; i++)\n\t{\n\t\tfor(int j = 0; j < NVAR; j++) variables[i*NVAR + j] = ff_variable[j];\n\t} #pragma omp parallel for default(shared) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu_double.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(static)", "context_chars": 100, "text": "\n}\n\n\n\nvoid compute_step_factor(int nelr, double* variables, double* areas, double* step_factors)\n{\n\tfor(int i = 0; i < nelr; i++)\n\t{\n\t\tdouble density = variables[NVAR*i + VAR_DENSITY];\n\n\t\tdouble3 momentum;\n\t\tmomentum.x = variables[NVAR*i + (VAR_MOMENTUM+0)];\n\t\tmomentum.y = variables[NVAR*i + (VAR_MOMENTUM+1)];\n\t\tmomentum.z = variables[NVAR*i + (VAR_MOMENTUM+2)];\n\n\t\tdouble density_energy = variables[NVAR*i + VAR_DENSITY_ENERGY];\n\t\tdouble3 velocity;\t compute_velocity(density, momentum, velocity);\n\t\tdouble speed_sqd = compute_speed_sqd(velocity);\n\t\tdouble pressure = compute_pressure(density, density_energy, speed_sqd);\n\t\tdouble speed_of_sound = compute_speed_of_sound(density, pressure);\n\n\t\t// dt = double(0.5) * std::sqrt(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once\n\t\tstep_factors[i] = double(0.5) / (std::sqrt(areas[i]) * (std::sqrt(speed_sqd) + speed_of_sound));\n\t} #pragma omp parallel for default(shared) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu_double.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(static)", "context_chars": 100, "text": "normals, double* variables, double* fluxes)\n{\n\tconst double smoothing_coefficient = double(0.2f);\n\n\tfor(int i = 0; i < nelr; i++)\n\t{\n\t\tint j, nb;\n\t\tdouble3 normal; double normal_len;\n\t\tdouble factor;\n\n\t\tdouble density_i = variables[NVAR*i + VAR_DENSITY];\n\t\tdouble3 momentum_i;\n\t\tmomentum_i.x = variables[NVAR*i + (VAR_MOMENTUM+0)];\n\t\tmomentum_i.y = variables[NVAR*i + (VAR_MOMENTUM+1)];\n\t\tmomentum_i.z = variables[NVAR*i + (VAR_MOMENTUM+2)];\n\n\t\tdouble density_energy_i = variables[NVAR*i + VAR_DENSITY_ENERGY];\n\n\t\tdouble3 velocity_i; \t\t\t\t compute_velocity(density_i, momentum_i, velocity_i);\n\t\tdouble speed_sqd_i = compute_speed_sqd(velocity_i);\n\t\tdouble speed_i = std::sqrt(speed_sqd_i);\n\t\tdouble pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);\n\t\tdouble speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);\n\t\tdouble3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;\n\t\tdouble3 flux_contribution_i_density_energy;\n\t\tcompute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);\n\n\t\tdouble flux_i_density = double(0.0);\n\t\tdouble3 flux_i_momentum;\n\t\tflux_i_momentum.x = double(0.0);\n\t\tflux_i_momentum.y = double(0.0);\n\t\tflux_i_momentum.z = double(0.0);\n\t\tdouble flux_i_density_energy = double(0.0);\n\n\t\tdouble3 velocity_nb;\n\t\tdouble density_nb, density_energy_nb;\n\t\tdouble3 momentum_nb;\n\t\tdouble3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;\n\t\tdouble3 flux_contribution_nb_density_energy;\n\t\tdouble speed_sqd_nb, speed_of_sound_nb, pressure_nb;\n\n\t\tfor(j = 0; j < NNB; j++)\n\t\t{\n\t\t\tnb = elements_surrounding_elements[i*NNB + j];\n\t\t\tnormal.x = normals[(i*NNB + j)*NDIM + 0];\n\t\t\tnormal.y = normals[(i*NNB + j)*NDIM + 1];\n\t\t\tnormal.z = normals[(i*NNB + j)*NDIM + 2];\n\t\t\tnormal_len = std::sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);\n\n\t\t\tif(nb >= 0) \t// a legitimate neighbor\n\t\t\t{\n\t\t\t\tdensity_nb = variables[nb*NVAR + VAR_DENSITY];\n\t\t\t\tmomentum_nb.x = variables[nb*NVAR + (VAR_MOMENTUM+0)];\n\t\t\t\tmomentum_nb.y = variables[nb*NVAR + (VAR_MOMENTUM+1)];\n\t\t\t\tmomentum_nb.z = variables[nb*NVAR + (VAR_MOMENTUM+2)];\n\t\t\t\tdensity_energy_nb = variables[nb*NVAR + VAR_DENSITY_ENERGY];\n\t\t\t\t\t\t\t\t\t\t\t\t\tcompute_velocity(density_nb, momentum_nb, velocity_nb);\n\t\t\t\tspeed_sqd_nb = compute_speed_sqd(velocity_nb);\n\t\t\t\tpressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);\n\t\t\t\tspeed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);\n\t\t\t\t\t\t\t\t\t\t\t\t\tcompute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);\n\n\t\t\t\t// artificial viscosity\n\t\t\t\tfactor = -normal_len*smoothing_coefficient*double(0.5)*(speed_i + std::sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);\n\t\t\t\tflux_i_density += factor*(density_i-density_nb);\n\t\t\t\tflux_i_density_energy += factor*(density_energy_i-density_energy_nb);\n\t\t\t\tflux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);\n\t\t\t\tflux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);\n\t\t\t\tflux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);\n\n\t\t\t\t// accumulate cell-centered fluxes\n\t\t\t\tfactor = double(0.5)*normal.x;\n\t\t\t\tflux_i_density += factor*(momentum_nb.x+momentum_i.x);\n\t\t\t\tflux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);\n\t\t\t\tflux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);\n\t\t\t\tflux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);\n\t\t\t\tflux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);\n\n\t\t\t\tfactor = double(0.5)*normal.y;\n\t\t\t\tflux_i_density += factor*(momentum_nb.y+momentum_i.y);\n\t\t\t\tflux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);\n\t\t\t\tflux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);\n\t\t\t\tflux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);\n\t\t\t\tflux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);\n\n\t\t\t\tfactor = double(0.5)*normal.z;\n\t\t\t\tflux_i_density += factor*(momentum_nb.z+momentum_i.z);\n\t\t\t\tflux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);\n\t\t\t\tflux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);\n\t\t\t\tflux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);\n\t\t\t\tflux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);\n\t\t\t}\n\t\t\telse if(nb == -1)\t// a wing boundary\n\t\t\t{\n\t\t\t\tflux_i_momentum.x += normal.x*pressure_i;\n\t\t\t\tflux_i_momentum.y += normal.y*pressure_i;\n\t\t\t\tflux_i_momentum.z += normal.z*pressure_i;\n\t\t\t}\n\t\t\telse if(nb == -2) // a far field boundary\n\t\t\t{\n\t\t\t\tfactor = double(0.5)*normal.x;\n\t\t\t\tflux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);\n\t\t\t\tflux_i_density_energy += factor*(ff_flux_contribution_density_energy.x+flux_contribution_i_density_energy.x);\n\t\t\t\tflux_i_momentum.x += factor*(ff_flux_contribution_momentum_x.x + flux_contribution_i_momentum_x.x);\n\t\t\t\tflux_i_momentum.y += factor*(ff_flux_contribution_momentum_y.x + flux_contribution_i_momentum_y.x);\n\t\t\t\tflux_i_momentum.z += factor*(ff_flux_contribution_momentum_z.x + flux_contribution_i_momentum_z.x);\n\n\t\t\t\tfactor = double(0.5)*normal.y;\n\t\t\t\tflux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);\n\t\t\t\tflux_i_density_energy += factor*(ff_flux_contribution_density_energy.y+flux_contribution_i_density_energy.y);\n\t\t\t\tflux_i_momentum.x += factor*(ff_flux_contribution_momentum_x.y + flux_contribution_i_momentum_x.y);\n\t\t\t\tflux_i_momentum.y += factor*(ff_flux_contribution_momentum_y.y + flux_contribution_i_momentum_y.y);\n\t\t\t\tflux_i_momentum.z += factor*(ff_flux_contribution_momentum_z.y + flux_contribution_i_momentum_z.y);\n\n\t\t\t\tfactor = double(0.5)*normal.z;\n\t\t\t\tflux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);\n\t\t\t\tflux_i_density_energy += factor*(ff_flux_contribution_density_energy.z+flux_contribution_i_density_energy.z);\n\t\t\t\tflux_i_momentum.x += factor*(ff_flux_contribution_momentum_x.z + flux_contribution_i_momentum_x.z);\n\t\t\t\tflux_i_momentum.y += factor*(ff_flux_contribution_momentum_y.z + flux_contribution_i_momentum_y.z);\n\t\t\t\tflux_i_momentum.z += factor*(ff_flux_contribution_momentum_z.z + flux_contribution_i_momentum_z.z);\n\n\t\t\t}\n\t\t}\n\n\t\tfluxes[i*NVAR + VAR_DENSITY] = flux_i_density;\n\t\tfluxes[i*NVAR + (VAR_MOMENTUM+0)] = flux_i_momentum.x;\n\t\tfluxes[i*NVAR + (VAR_MOMENTUM+1)] = flux_i_momentum.y;\n\t\tfluxes[i*NVAR + (VAR_MOMENTUM+2)] = flux_i_momentum.z;\n\t\tfluxes[i*NVAR + VAR_DENSITY_ENERGY] = flux_i_density_energy;\n\t} #pragma omp parallel for default(shared) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/cfd/euler3d_cpu_double.cpp", "omp_pragma_line": "#pragma omp parallel for default(shared) schedule(static)", "context_chars": 100, "text": "int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes)\n{\n\tfor(int i = 0; i < nelr; i++)\n\t{\n\t\tdouble factor = step_factors[i]/double(RK+1-j);\n\n\t\tvariables[NVAR*i + VAR_DENSITY] = old_variables[NVAR*i + VAR_DENSITY] + factor*fluxes[NVAR*i + VAR_DENSITY];\n\t\tvariables[NVAR*i + VAR_DENSITY_ENERGY] = old_variables[NVAR*i + VAR_DENSITY_ENERGY] + factor*fluxes[NVAR*i + VAR_DENSITY_ENERGY];\n\t\tvariables[NVAR*i + (VAR_MOMENTUM+0)] = old_variables[NVAR*i + (VAR_MOMENTUM+0)] + factor*fluxes[NVAR*i + (VAR_MOMENTUM+0)];\n\t\tvariables[NVAR*i + (VAR_MOMENTUM+1)] = old_variables[NVAR*i + (VAR_MOMENTUM+1)] + factor*fluxes[NVAR*i + (VAR_MOMENTUM+1)];\n\t\tvariables[NVAR*i + (VAR_MOMENTUM+2)] = old_variables[NVAR*i + (VAR_MOMENTUM+2)] + factor*fluxes[NVAR*i + (VAR_MOMENTUM+2)];\n\t} #pragma omp parallel for default(shared) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/hotspot/hotspot_openmp.cpp", "omp_pragma_line": "#pragma omp parallel for shared(power, temp, result) private(chunk, r, c, delta) firstprivate(row, col, num_chunk, chunks_in_row) schedule(static)", "context_chars": 100, "text": "LOCK_SIZE_R;\n\n#ifdef OPEN\n #ifndef __MIC__\n\tomp_set_num_threads(num_omp_threads);\n #endif\n for ( chunk = 0; chunk < num_chunk; ++chunk )\n {\n int r_start = BLOCK_SIZE_R*(chunk/chunks_in_col);\n int c_start = BLOCK_SIZE_C*(chunk%chunks_in_row); \n int r_end = r_start + BLOCK_SIZE_R > row ? row : r_start + BLOCK_SIZE_R;\n int c_end = c_start + BLOCK_SIZE_C > col ? col : c_start + BLOCK_SIZE_C;\n \n if ( r_start == 0 || c_start == 0 || r_end == row || c_end == col )\n {\n for ( r = r_start; r < r_start + BLOCK_SIZE_R; ++r ) {\n for ( c = c_start; c < c_start + BLOCK_SIZE_C; ++c ) {\n /* Corner 1 */\n if ( (r == 0) && (c == 0) ) {\n delta = (Cap_1) * (power[0] +\n (temp[1] - temp[0]) * Rx_1 +\n (temp[col] - temp[0]) * Ry_1 +\n (amb_temp - temp[0]) * Rz_1);\n }\t/* Corner 2 */\n else if ((r == 0) && (c == col-1)) {\n delta = (Cap_1) * (power[c] +\n (temp[c-1] - temp[c]) * Rx_1 +\n (temp[c+col] - temp[c]) * Ry_1 +\n ( amb_temp - temp[c]) * Rz_1);\n }\t/* Corner 3 */\n else if ((r == row-1) && (c == col-1)) {\n delta = (Cap_1) * (power[r*col+c] + \n (temp[r*col+c-1] - temp[r*col+c]) * Rx_1 + \n (temp[(r-1)*col+c] - temp[r*col+c]) * Ry_1 + \n ( amb_temp - temp[r*col+c]) * Rz_1);\t\t\t\t\t\n }\t/* Corner 4\t*/\n else if ((r == row-1) && (c == 0)) {\n delta = (Cap_1) * (power[r*col] + \n (temp[r*col+1] - temp[r*col]) * Rx_1 + \n (temp[(r-1)*col] - temp[r*col]) * Ry_1 + \n (amb_temp - temp[r*col]) * Rz_1);\n }\t/* Edge 1 */\n else if (r == 0) {\n delta = (Cap_1) * (power[c] + \n (temp[c+1] + temp[c-1] - 2.0*temp[c]) * Rx_1 + \n (temp[col+c] - temp[c]) * Ry_1 + \n (amb_temp - temp[c]) * Rz_1);\n }\t/* Edge 2 */\n else if (c == col-1) {\n delta = (Cap_1) * (power[r*col+c] + \n (temp[(r+1)*col+c] + temp[(r-1)*col+c] - 2.0*temp[r*col+c]) * Ry_1 + \n (temp[r*col+c-1] - temp[r*col+c]) * Rx_1 + \n (amb_temp - temp[r*col+c]) * Rz_1);\n }\t/* Edge 3 */\n else if (r == row-1) {\n delta = (Cap_1) * (power[r*col+c] + \n (temp[r*col+c+1] + temp[r*col+c-1] - 2.0*temp[r*col+c]) * Rx_1 + \n (temp[(r-1)*col+c] - temp[r*col+c]) * Ry_1 + \n (amb_temp - temp[r*col+c]) * Rz_1);\n }\t/* Edge 4 */\n else if (c == 0) {\n delta = (Cap_1) * (power[r*col] + \n (temp[(r+1)*col] + temp[(r-1)*col] - 2.0*temp[r*col]) * Ry_1 + \n (temp[r*col+1] - temp[r*col]) * Rx_1 + \n (amb_temp - temp[r*col]) * Rz_1);\n }\n result[r*col+c] =temp[r*col+c]+ delta;\n }\n }\n continue;\n }\n\n for ( r = r_start; r < r_start + BLOCK_SIZE_R; ++r ) {\n#pragma omp simd \n for ( c = c_start; c < c_start + BLOCK_SIZE_C; ++c ) {\n /* Update Temperatures */\n result[r*col+c] =temp[r*col+c]+ \n ( Cap_1 * (power[r*col+c] + \n (temp[(r+1)*col+c] + temp[(r-1)*col+c] - 2.f*temp[r*col+c]) * Ry_1 + \n (temp[r*col+c+1] + temp[r*col+c-1] - 2.f*temp[r*col+c]) * Rx_1 + \n (amb_temp - temp[r*col+c]) * Rz_1));\n }\n }\n } #pragma omp parallel for shared(power, temp, result) private(chunk, r, c, delta) firstprivate(row, col, num_chunk, chunks_in_row) schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/rodinia_3.1/openmp/heartwall/main.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "====================================================\n\n\t\tomp_set_num_threads(omp_num_threads);\n\t\t\n\n\t\tfor(i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)", "context_chars": 100, "text": "---------------------------------------------------------*/\n\tnorm_temp11 = 0.0;\n\tnorm_temp12 = 0.0;\nfor (j = 1; j <= lastcol-firstcol+1; j++) {\n norm_temp11 = norm_temp11 + x[j]*z[j];\n norm_temp12 = norm_temp12 + z[j]*z[j];\n\t} #pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j)", "context_chars": 100, "text": "-\nc Normalize z to obtain x\nc-------------------------------------------------------------------*/\nfor (j = 1; j <= lastcol-firstcol+1; j++) {\n x[j] = norm_temp12*z[j];\n\t} #pragma omp parallel for default(shared) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": "ing vector to (1, 1, .... 1)\nc-------------------------------------------------------------------*/\nfor (i = 1; i <= NA+1; i++) {\n x[i] = 1.0;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)", "context_chars": 100, "text": "--------------------------------------------------------*/\n\tnorm_temp11 = 0.0;\n\tnorm_temp12 = 0.0;\n\nfor (j = 1; j <= lastcol-firstcol+1; j++) {\n norm_temp11 = norm_temp11 + x[j]*z[j];\n norm_temp12 = norm_temp12 + z[j]*z[j];\n\t} #pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j)", "context_chars": 100, "text": "-\nc Normalize z to obtain x\nc-------------------------------------------------------------------*/\nfor (j = 1; j <= lastcol-firstcol+1; j++) {\n x[j] = norm_temp12*z[j];\n\t} #pragma omp parallel for default(shared) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": " to mark nonzero positions\nc---------------------------------------------------------------------*/\nfor (i = 1; i <= n; i++) {\n\tcolidx[n+i] = 0;\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j)", "context_chars": 100, "text": "umber of triples in each row\nc-------------------------------------------------------------------*/\nfor (j = 1; j <= n; j++) {\n\trowstr[j] = 0;\n\tmark[j] = FALSE;\n } #pragma omp parallel for default(shared) private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(k,j)", "context_chars": 100, "text": " ... preload data pages\nc---------------------------------------------------------------------*/\nfor(j = 0;j <= nrows-1;j++) {\n for(k = rowstr[j];k <= rowstr[j+1]-1;k++)\n\t a[k] = 0.0;\n } #pragma omp parallel for default(shared) private(k,j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/CG/cg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i) ", "context_chars": 100, "text": "adding elements\nc-------------------------------------------------------------------*/\n nza = 0;\nfor (i = 1; i <= n; i++) {\n\tx[i] = 0.0;\n\tmark[i] = FALSE;\n } #pragma omp parallel for default(shared) private(i) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k) ", "context_chars": 100, "text": "ier space\nc-------------------------------------------------------------------*/\n\n int i, j, k;\n\nfor (k = 0; k < d[2]; k++) {\n\tfor (j = 0; j < d[1]; j++) {\n for (i = 0; i < d[0]; i++) {\n\t crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i,j,k) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/FT/ft.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,ii,ii2,jj,ij2,kk) ", "context_chars": 100, "text": "ck:\nc mod(i-1+n/2, n) - n/2\nc-------------------------------------------------------------------*/\n\nfor (i = 0; i < dims[2][0]; i++) {\n\tii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;\n\tii2 = ii*ii;\n\tfor (j = 0; j < dims[2][1]; j++) {\n jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;\n ij2 = jj*jj+ii2;\n for (k = 0; k < dims[2][2]; k++) {\n\t\tkk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;\n\t\tindexmap[k][j][i] = kk*kk+ij2;\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i,j,k,ii,ii2,jj,ij2,kk) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)", "context_chars": 100, "text": "--------------------------------------------*/\n\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\nfor (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n \n\tt1 = bt * r3;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = -r2;\n\trhs[1][i][j][k] = r1;\n\trhs[2][i][j][k] = bt * ( r4 - r5 );\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/SP/sp.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)", "context_chars": 100, "text": "-------------------------------------------*/\n\n int i, j, k;\n double r1, r2, r3, r4, r5, t1, t2;\n\nfor (i = 1; i <= grid_points[0]-2; i++) {\n for (j = 1; j <= grid_points[1]-2; j++) {\n for (k = 1; k <= grid_points[2]-2; k++) {\n\n\tr1 = rhs[0][i][j][k];\n\tr2 = rhs[1][i][j][k];\n\tr3 = rhs[2][i][j][k];\n\tr4 = rhs[3][i][j][k];\n\tr5 = rhs[4][i][j][k];\n\n\tt1 = bt * r1;\n\tt2 = 0.5 * ( r4 + r5 );\n\n\trhs[0][i][j][k] = bt * ( r4 - r5 );\n\trhs[1][i][j][k] = -r3;\n\trhs[2][i][j][k] = r2;\n\trhs[3][i][j][k] = -t1 + t2;\n\trhs[4][i][j][k] = t1 + t2;\n }\n }\n } #pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/EP/ep.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i)", "context_chars": 100, "text": " code.\n*/\n vranlc(0, &(dum[0]), dum[1], &(dum[2]));\n dum[0] = randlc(&(dum[1]), dum[2]);\n \nfor (i = 0; i < 2*NK; i++) x[i] = -1.0e99;\n \n Mops = log(sqrt(fabs(max(1.0, 1.0))));\n\n timer_clear(1);\n timer_clear(2);\n timer_clear(3);\n timer_start(1);\n\n vranlc(0, &t1, A, x);\n\n/* Compute AN = A ^ (2 * NK) (mod 2^46). */\n\n t1 = A;\n\n for ( i = 1; i <= MK+1; i++) {\n\tt2 = randlc(&t1, t1);\n } #pragma omp parallel for default(shared) private(i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2) ", "context_chars": 100, "text": "---------------------------------------------------*/\n\n int i3, i2, i1;\n double r1[M], r2[M];\nfor (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tr1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]\n\t\t + r[i3-1][i2][i1] + r[i3+1][i2][i1];\n\t\tr2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]\n\t\t + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];\n\t }\n for (i1 = 1; i1 < n1-1; i1++) {\n\t\tu[i3][i2][i1] = u[i3][i2][i1]\n\t\t + c[0] * r[i3][i2][i1]\n\t\t + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]\n\t\t\t + r1[i1] )\n\t\t + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );\n/*--------------------------------------------------------------------\nc Assume c(3) = 0 (Enable line below if c(3) not= 0)\nc---------------------------------------------------------------------\nc > + c(3) * ( r2(i1-1) + r2(i1+1) )\nc-------------------------------------------------------------------*/\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)", "context_chars": 100, "text": "---------------------------------------------------*/\n\n int i3, i2, i1;\n double u1[M], u2[M];\nfor (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tu1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]\n\t\t + u[i3-1][i2][i1] + u[i3+1][i2][i1];\n\t\tu2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]\n\t\t + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];\n\t }\n\t for (i1 = 1; i1 < n1-1; i1++) {\n\t\tr[i3][i2][i1] = v[i3][i2][i1]\n\t\t - a[0] * u[i3][i2][i1]\n/*--------------------------------------------------------------------\nc Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0)\nc---------------------------------------------------------------------\nc > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3)\nc > + u1(i1) )\nc-------------------------------------------------------------------*/\n\t\t- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )\n\t\t - a[3] * ( u2[i1-1] + u2[i1+1] );\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)", "context_chars": 100, "text": "lse {\n d2 = 1;\n }\n\n if (m3k == 3) {\n d3 = 2;\n } else {\n d3 = 1;\n }\nfor (j3 = 1; j3 < m3j-1; j3++) {\n\ti3 = 2*j3-d3;\n/*C i3 = 2*j3-1*/\n\tfor (j2 = 1; j2 < m2j-1; j2++) {\n i2 = 2*j2-d2;\n/*C i2 = 2*j2-1*/\n\n for (j1 = 1; j1 < m1j; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\tx1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]\n\t\t + r[i3][i2+1][i1] + r[i3+2][i2+1][i1];\n\t\ty1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]\n\t\t + r[i3][i2+2][i1] + r[i3+2][i2+2][i1];\n\t }\n\n for (j1 = 1; j1 < m1j-1; j1++) {\n\t\ti1 = 2*j1-d1;\n/*C i1 = 2*j1-1*/\n\t\ty2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1]\n\t\t + r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1];\n\t\tx2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1]\n\t\t + r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1];\n\t\ts[j3][j2][j1] =\n\t\t 0.5 * r[i3+1][i2+1][i1+1]\n\t\t + 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)\n\t\t + 0.125 * ( x1[i1] + x1[i1+2] + y2)\n\t\t + 0.0625 * ( y1[i1] + y1[i1+2] );\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)", "context_chars": 100, "text": " parameter( m=535 )\n*/\n double z1[M], z2[M], z3[M];\n\n if ( n1 != 3 && n2 != 3 && n3 != 3 ) {\nfor (i3 = 0; i3 < mm3-1; i3++) {\n for (i2 = 0; i2 < mm2-1; i2++) {\n\t\tfor (i1 = 0; i1 < mm1; i1++) {\n\t\t z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];\n\t\t z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];\n\t\t z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]\n\t\t\t+z[i3][i2][i1];\n\t\t u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]\n\t\t\t+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]\n\t\t\t+0.5 * z1[i1];\n\t\t u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]\n\t\t\t+0.25*( z1[i1] + z1[i1+1] );\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]\n\t\t\t+0.5 * z2[i1];\n\t\t u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]\n\t\t\t+0.25*( z2[i1] + z2[i1+1] );\n\t\t}\n\t\tfor (i1 = 0; i1 < mm1-1; i1++) {\n\t\t u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]\n\t\t\t+0.25* z3[i1];\n\t\t u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]\n\t\t\t+0.125*( z3[i1] + z3[i1+1] );\n\t\t}\n\t }\n\t} #pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i1,i2,i3,a) reduction(+:s) reduction(max:tmp)", "context_chars": 100, "text": "--*/\n\n double s = 0.0;\n int i3, i2, i1, n;\n double a = 0.0, tmp = 0.0;\n\n n = nx*ny*nz;\n\nfor (i3 = 1; i3 < n3-1; i3++) {\n\tfor (i2 = 1; i2 < n2-1; i2++) {\n for (i1 = 1; i1 < n1-1; i1++) {\n\t\ts = s + r[i3][i2][i1] * r[i3][i2][i1];\n\t\ta = fabs(r[i3][i2][i1]);\n\t\tif (a > tmp) tmp = a;\n\t }\n\t}\n } #pragma omp parallel for default(shared) private(i1,i2,i3,a) reduction(+:s) reduction(max:tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i2, i1) ", "context_chars": 100, "text": "ber\\n\");\n for (i = MM-1; i >= 0; i--) {\n\tprintf(\" %4d\", jg[0][i][1]);\n }\n printf(\"\\n\");*/\n\nfor (i3 = 0; i3 < n3; i3++) {\n\tfor (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for private(i2, i1) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/original/NPB3.0-omp-c/MG/mg.c", "omp_pragma_line": "#pragma omp parallel for private(i1,i2,i3)", "context_chars": 100, "text": "-------\nc-------------------------------------------------------------------*/\n\n int i1, i2, i3;\nfor (i3 = 0;i3 < n3; i3++) {\n\tfor (i2 = 0; i2 < n2; i2++) {\n for (i1 = 0; i1 < n1; i1++) {\n\t\tz[i3][i2][i1] = 0.0;\n\t }\n\t}\n } #pragma omp parallel for private(i1,i2,i3)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB049-fprintf-orig-no.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "ain(int argc, char* argv[])\n{\n int i;\n int ret;\n FILE* pfile;\n int len=1000;\n\n int A[1000];\n\n for (i=0; i #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB049-fprintf-orig-no.c", "omp_pragma_line": "#pragma omp parallel for simd ordered", "context_chars": 100, "text": "en(\"mytempfile.txt\",\"a+\");\n if (pfile ==NULL)\n {\n fprintf(stderr,\"Error in fopen()\\n\");\n }\n\n for (i=0; i #pragma omp parallel for simd ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "rgv[])\n{\n int i;\n int tmp;\n int len=100;\n if (argc>1)\n len = atoi(argv[1]);\n int a[len];\n\n for (i=0;i #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB020-privatemissing-var-yes.c", "omp_pragma_line": "#pragma omp parallel for simd private(tmp)", "context_chars": 100, "text": " atoi(argv[1]);\n int a[len];\n\n #pragma omp parallel for simd\n for (i=0;ifor (i=0;i #pragma omp parallel for simd private(tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB050-functionparameter-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "i] = volnew_o8;\n } \n}\n\nint main()\n{\n double o1[101];\n double c[101];\n int i;\n int len = 100;\n for (i = 0; i < len; ++i) {\n c[i] = i + 1.01;\n o1[i] = i + 1.01;\n } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "Arrays passed as function parameters\n*/\nvoid foo1(double o1[], double c[], int len)\n{ \n int i ;\n\n for (i = 0; i < len; ++i) {\n double volnew_o8 = 0.5 * c[i];\n o1[i] = volnew_o8;\n } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB050-functionparameter-orig-no.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "o1[i] = volnew_o8;\n } \n}\n\ndouble o1[100];\ndouble c[100];\n\nint main()\n{\n int i;\n int len = 100;\n for (i = 0; i < len; ++i) {\n c[i] = i + 1.01;\n o1[i] = i + 1.01;\n } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "ude \nint main(int argc, char* argv[])\n{\n int i;\n int tmp;\n int len=100;\n int a[100];\n for (i=0;i #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB028-privatemissing-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for simd private(tmp)", "context_chars": 100, "text": ";\n int len=100;\n int a[100];\n #pragma omp parallel for simd\n for (i=0;ifor (i=0;i #pragma omp parallel for simd private(tmp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for simd firstprivate (g)", "context_chars": 100, "text": "nclude \n\n/*\nExample use of firstprivate()\n*/\nvoid foo(int * a, int n, int g)\n{\n int i;\n for (i=0;i #pragma omp parallel for simd firstprivate (g)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": " for (i=0;ifor (i=0;i #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB048-firstprivate-orig-no.c", "omp_pragma_line": "#pragma omp parallel for simd ordered", "context_chars": 100, "text": "00;\n #pragma omp parallel for simd\n for (i=0;ifor (i=0;i #pragma omp parallel for simd ordered"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "e \nint main(int argc,char *argv[])\n{\n int i, j;\n int len = 20; \n\n double a[20][20];\n\n for (i=0; i< len; i++)\n #pragma omp parallel for simd \n for (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for simd ", "context_chars": 100, "text": "len = 20; \n\n double a[20][20];\n\n #pragma omp parallel for private(j)\n for (i=0; i< len; i++)\n for (j=0; j #pragma omp parallel for simd "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB003-antidep2-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for simd ", "context_chars": 100, "text": "r (j=0; jfor (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for simd "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB005-indirectaccess1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "* xa1 = base;\n double * xa2 = xa1 + 2014;\n int i;\n\n // initialize segments touched by indexSet\n for (i =521; i<= 2025; ++i)\n {\n base[i]=0.5*i;\n } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB005-indirectaccess1-orig-yes.c", "omp_pragma_line": "#pragma omp parallel for simd schedule(static,1)", "context_chars": 100, "text": "]=0.5*i;\n }\n// default static even scheduling may not trigger data race, using static,1 instead.\n for (i =0; i< N; ++i) \n {\n int idx = indexSet[i];\n xa1[idx]+= 1.0 + i;\n xa2[idx]+= 3.0 + i;\n } #pragma omp parallel for simd schedule(static,1)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for private(j)", "context_chars": 100, "text": "])\n{\n int i, j;\n int len = 20; \n\n if (argc>1)\n len = atoi(argv[1]);\n\n double a[len][len];\n\n for (i=0; i< len; i++)\n #pragma omp parallel for simd\n for (j=0; j #pragma omp parallel for private(j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "rgv[1]);\n\n double a[len][len];\n\n #pragma omp parallel for private(j)\n for (i=0; i< len; i++)\n for (j=0; j #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LLNL/AutoParBench/benchmarks/reference_cpu_simd/dataracebench/DRB004-antidep2-var-yes.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": " for simd\n for (j=0; jfor (j = 0; j < len ; j += 1) {\n a[i][j] += a[i + 1][j];\n } #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/RRZE-HPC/MD-Bench/src/clusterpair/force_eam.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " eam->nrho_tot;\n */\n double S = getTimeStamp();\n\n LIKWID_MARKER_START(\"force\");\n /*\n for(int i = 0; i < Nlocal; i++) {\n neighs = &neighbor->neighbors[i * neighbor->maxneighs];\n int numneighs = neighbor->numneigh[i];\n MD_FLOAT xtmp = atom_x(i);\n MD_FLOAT ytmp = atom_y(i);\n MD_FLOAT ztmp = atom_z(i);\n MD_FLOAT rhoi = 0;\n#ifdef EXPLICIT_TYPES\n const int type_i = atom->type[i];\n\n #pragma ivdep\n for(int k = 0; k < numneighs; k++) {\n int j = neighs[k];\n MD_FLOAT delx = xtmp - atom_x(j);\n MD_FLOAT dely = ytmp - atom_y(j);\n MD_FLOAT delz = ztmp - atom_z(j);\n MD_FLOAT rsq = delx * delx + dely * dely + delz * delz;\n#ifdef EXPLICIT_TYPES\n const int type_j = atom->type[j];\n const int type_ij = type_i * ntypes + type_j;\n const MD_FLOAT cutforcesq = atom->cutforcesq[type_ij];\n#else\n const MD_FLOAT cutforcesq = param->cutforce * param->cutforce;\n\n if(rsq < cutforcesq) {\n MD_FLOAT p = sqrt(rsq) * rdr + 1.0;\n int m = (int)(p);\n m = m < nr - 1 ? m : nr - 1;\n p -= m;\n p = p < 1.0 ? p : 1.0;\n#ifdef EXPLICIT_TYPES\n rhoi += ((rhor_spline[type_ij * nr_tot + m * 7 + 3] * p +\n rhor_spline[type_ij * nr_tot + m * 7 + 4]) * p +\n rhor_spline[type_ij * nr_tot + m * 7 + 5]) * p +\n rhor_spline[type_ij * nr_tot + m * 7 + 6];\n#else\n rhoi += ((rhor_spline[m * 7 + 3] * p +\n rhor_spline[m * 7 + 4]) * p +\n rhor_spline[m * 7 + 5]) * p +\n rhor_spline[m * 7 + 6];\n\n }\n }\n\n#ifdef EXPLICIT_TYPES\n const int type_ii = type_i * type_i;\n\n MD_FLOAT p = 1.0 * rhoi * rdrho + 1.0;\n int m = (int)(p);\n m = MAX(1, MIN(m, nrho - 1));\n p -= m;\n p = MIN(p, 1.0);\n#ifdef EXPLICIT_TYPES\n fp[i] = (frho_spline[type_ii * nrho_tot + m * 7 + 0] * p +\n frho_spline[type_ii * nrho_tot + m * 7 + 1]) * p +\n frho_spline[type_ii * nrho_tot + m * 7 + 2];\n#else\n fp[i] = (frho_spline[m * 7 + 0] * p + frho_spline[m * 7 + 1]) * p + frho_spline[m\n* 7 + 2]; \n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/imsure/parallel-programming/matrix-multiplication/matrix-mul-openmp.c", "omp_pragma_line": "#pragma omp parallel for shared(matrix1, matrix2, matrix3, chunksize) \\", "context_chars": 100, "text": "rintf( \"Matrix 2:\\n\" );\n print_matrix( matrix2, size );\n }\n\n gettimeofday( &tstart, NULL );\n \nprivate(i,j,k,sum) schedule(static, chunksize)\n for (i = 0; i < size; ++i) { // hold row index of 'matrix1'\n for (j = 0; j < size; ++j) { // hold column index of 'matrix2'\n sum = 0; // hold value of a cell\n /* one pass to sum the multiplications of corresponding cells\n\t in the row vector and column vector. */\n for (k = 0; k < size; ++k) { \n\tsum += matrix1[ i ][ k ] * matrix2[ k ][ j ];\n }\n matrix3[ i ][ j ] = sum;\n }\n } #pragma omp parallel for shared(matrix1, matrix2, matrix3, chunksize) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/imsure/parallel-programming/red-black-grid-computation/rb-grid-hybrid.c", "omp_pragma_line": "#pragma omp parallel for shared(grid,num_threads) private(i,j,jstart) schedule (static, chunk_size)", "context_chars": 100, "text": " compute_grid_red( double **grid, int gridsize, int strip_size, int myrank )\n{\n int i, j, jstart;\n\nfor (i = 1; i < strip_size-1; i++) {\n if (i % 2 == 1) jstart = 1; // odd row\n else jstart = 2; // even row\n \n for (j = jstart; j < gridsize-1; j += 2) {\n grid[ i ][ j ] = ( grid[ i-1 ][ j ] + grid[ i+1 ][ j ] +\n\t\t\t grid[ i ][ j-1 ] + grid[ i ][ j+1 ] ) * 0.25;\n }\n } #pragma omp parallel for shared(grid,num_threads) private(i,j,jstart) schedule (static, chunk_size)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/imsure/parallel-programming/red-black-grid-computation/rb-grid-hybrid.c", "omp_pragma_line": "#pragma omp parallel for shared(grid,num_threads) private(i,j,jstart) schedule (static, chunk_size)", "context_chars": 100, "text": "ompute_grid_black( double **grid, int gridsize, int strip_size, int myrank )\n{\n int i, j, jstart;\n\nfor (i = 1; i < strip_size-1; i++) {\n if (i % 2 == 1) jstart = 2; // odd row\n else jstart = 1; // even row\n \n for (j = jstart; j < gridsize-1; j += 2) {\n grid[ i ][ j ] = ( grid[ i-1 ][ j ] + grid[ i+1 ][ j ] +\n\t\t\t grid[ i ][ j-1 ] + grid[ i ][ j+1 ] ) * 0.25;\n }\n } #pragma omp parallel for shared(grid,num_threads) private(i,j,jstart) schedule (static, chunk_size)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/2. K-means with OpenMP/kmeans2/kmeans2.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tot_dist) ", "context_chars": 100, "text": "n\");\n\t}\n}\n\n//returns the total minimum distance\nfloat estimate_class(void) {\n\tfloat tot_dist = 0.0;\nfor(int i=0;i #pragma omp parallel for reduction(+:tot_dist) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/2. K-means with OpenMP/kmeans3/kmeans3.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tot_dist) schedule(static,50)", "context_chars": 100, "text": "n\");\n\t}\n}\n\n//returns the total minimum distance\nfloat estimate_class(void) {\n\tfloat tot_dist = 0.0;\n//#pragma omp simd\n\tfor(int i=0;i #pragma omp parallel for reduction(+:tot_dist) schedule(static,50)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/2. K-means with OpenMP/kmeans4/kmeans4.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tot_dist) schedule(static,50)", "context_chars": 100, "text": "n\");\n\t}\n}\n\n//returns the total minimum distance\nfloat estimate_class(void) {\n\tfloat tot_dist = 0.0;\nfor(int i=0;i #pragma omp parallel for reduction(+:tot_dist) schedule(static,50)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman5/travelling_salesman5.0.c", "omp_pragma_line": "#pragma omp parallel for reduction(my_min:nearest_city) private(dist)", "context_chars": 100, "text": "ity.dist = 1e6;\n\tnearest_city.city = -1;\n\t\n\tprintf(\"\\n\\nVISIT CLOSEST FROM: %d\\n\\n\", to_visit-1);\n\n\tfor(int i=1;i #pragma omp parallel for reduction(my_min:nearest_city) private(dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman5/travelling_salesman5.3.c", "omp_pragma_line": "#pragma omp parallel for reduction(my_min:nearest_city) private(dist)", "context_chars": 100, "text": "initialize nearest_city\n\tnearest nearest_city;\n\tnearest_city.dist = 1e6;\n\tnearest_city.city = -1;\n\n\tfor(int i=1;i #pragma omp parallel for reduction(my_min:nearest_city) private(dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman5/travelling_salesman5.3.c", "omp_pragma_line": "#pragma omp parallel for reduction(my_min:nearest_city1) private(dist)", "context_chars": 100, "text": "_city1.dist = 1e6;\n\tnearest_city1.city = -1;\n\tnearest_city2.dist = 1e6;\n\tnearest_city2.city = -1;\n\n\tfor(int i=1;i #pragma omp parallel for reduction(my_min:nearest_city1) private(dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman5/travelling_salesman5.3.c", "omp_pragma_line": "#pragma omp parallel for reduction(my_min:nearest_city2) private(dist)", "context_chars": 100, "text": "ed[nearest_city1.city] = true;\n\t\treturn nearest_city1.dist;\n\t}else{\n\n\t\t//find second nearest city\n\t\tfor(int i=1;i #pragma omp parallel for reduction(my_min:nearest_city2) private(dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman5/travelling_salesman5.2.c", "omp_pragma_line": "#pragma omp parallel for reduction(my_min:nearest_city) private(dist)", "context_chars": 100, "text": "initialize nearest_city\n\tnearest nearest_city;\n\tnearest_city.dist = 1e6;\n\tnearest_city.city = -1;\n\n\tfor(int i=1;i #pragma omp parallel for reduction(my_min:nearest_city) private(dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman5/travelling_salesman5.1.c", "omp_pragma_line": "#pragma omp parallel for reduction(my_min:nearest_city) private(dist)", "context_chars": 100, "text": "ity.dist = 1e6;\n\tnearest_city.city = -1;\n\t\n\tprintf(\"\\n\\nVISIT CLOSEST FROM: %d\\n\\n\", to_visit-1);\n\n\tfor(int i=1;i #pragma omp parallel for reduction(my_min:nearest_city) private(dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman5/travelling_salesman5.1.c", "omp_pragma_line": "#pragma omp parallel for reduction(my_min:nearest_city1) private(dist)", "context_chars": 100, "text": "t = 1e6;\n\tnearest_city2.city = -1;\n\n\tprintf(\"\\n\\nVISIT SECOND CLOSEST FROM: %d\\n\\n\", to_visit-1);\n\n\tfor(int i=1;i #pragma omp parallel for reduction(my_min:nearest_city1) private(dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman5/travelling_salesman5.1.c", "omp_pragma_line": "#pragma omp parallel for reduction(my_min:nearest_city2) private(dist)", "context_chars": 100, "text": "ed[nearest_city1.city] = true;\n\t\treturn nearest_city1.dist;\n\t}else{\n\n\t\t//find second nearest city\n\t\tfor(int i=1;i #pragma omp parallel for reduction(my_min:nearest_city2) private(dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman2/travelling_salesman2.1.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,2)", "context_chars": 100, "text": ";\n\tswapped_indices[1] = ind2;\n}\n\nvoid calc_adjacent_dists(int *swapped_indices, float *adj_dists){\n\tfor(int i=0;i<2;i++){\n\t\t*adj_dists++ = distance(swapped_indices[i], swapped_indices[i]-1);\n\t\t*adj_dists++ = distance(swapped_indices[i], swapped_indices[i]+1);\n\t} #pragma omp parallel for schedule(static,2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman2/travelling_salesman2.1.c", "omp_pragma_line": "#pragma omp parallel for simd reduction(+:old_tot_dist) schedule(static,4)", "context_chars": 100, "text": " from the difference of only the 4 affected distances\n\t//instead of recalculating every distance\n\t\n\tfor(int i=0;i<4;i++){\n\t\told_tot_dist += new_dists[i] - prev_dists[i];\t\n\t} #pragma omp parallel for simd reduction(+:old_tot_dist) schedule(static,4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman2/travelling_salesman2.1.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tot_dist)", "context_chars": 100, "text": "- prev_dists[i];\t\n\t}\n\treturn old_tot_dist;\n}\n\nfloat total_distance(void){\n\tfloat tot_dist = 0.0;\n\t\n\tfor (int i=0;i #pragma omp parallel for reduction(+:tot_dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman2/travelling_salesman2.0.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,2)", "context_chars": 100, "text": "\tswapped_indices[1] = ind2;\n}\n\nvoid calc_adjacent_dists(int *swapped_indices, float *adj_dists){\n\t//for(int i=0;i<2;i++){\n\t\t*adj_dists++ = distance(swapped_indices[i], swapped_indices[i]-1);\n\t\t*adj_dists++ = distance(swapped_indices[i], swapped_indices[i]+1);\n\t} #pragma omp parallel for schedule(static,2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman2/travelling_salesman2.0.c", "omp_pragma_line": "#pragma omp parallel for simd reduction(+:old_tot_dist) schedule(static,4)", "context_chars": 100, "text": "rom the difference of only the 4 affected distances\n\t//instead of recalculating every distance\n\t\n\t//for(int i=0;i<4;i++){\n\t\told_tot_dist += new_dists[i] - prev_dists[i];\t\n\t} #pragma omp parallel for simd reduction(+:old_tot_dist) schedule(static,4)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/3. Travelling salesman problem/travelling_salesman2/travelling_salesman2.0.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:tot_dist)", "context_chars": 100, "text": "- prev_dists[i];\t\n\t}\n\treturn old_tot_dist;\n}\n\nfloat total_distance(void){\n\tfloat tot_dist = 0.0;\n\t\n\tfor (int i=0;i #pragma omp parallel for reduction(+:tot_dist)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens2/Queens2.0.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "-;\t//backtrack to previous column\n\t\t}\n\t}\n\n\tprint_results(tries);\n\treturn 0;\n}\n\n\nvoid board_init(){\n\tfor(int i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens2/Queens2.0.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "irst column are valid\n\t}\n}\n\n\n//calculates valid rows of next column\nvoid calc_next_valid(int col){\n\tfor(int row=0;row #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens2/Queens2.1.c", "omp_pragma_line": "#pragma omp parallel for simd", "context_chars": 100, "text": "en & backtrack to previous column\n\t\t}\n\t}\n\n\tprint_results(tries);\n\treturn 0;\n}\n\n\nvoid board_init(){\n\tfor(int i=0;i #pragma omp parallel for simd"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens2/Queens2.1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "positions are valid\n\t\t}\n\t}\n}\n\n\n//calculates valid rows of current column\nvoid calc_valid(int col){\n\tfor(int row=0;row #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens4/Queens4.1.c", "omp_pragma_line": "#pragma omp parallel for private(gene1, gene2)", "context_chars": 100, "text": "TION_SIZE-1] = 1.0;\n\t\n\twhile(1){\n\t\tfound = fitness();\n\t\tif(found >= 0) break;\n\t\t\n\t\tgeneration++;\n\n\t\tfor(int pair=0;pair #pragma omp parallel for private(gene1, gene2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens4/Queens4.1.c", "omp_pragma_line": "#pragma omp parallel for private(random_row)", "context_chars": 100, "text": "ray is used --> max(N) = 256 = max(queens)\nvoid population_init(void){\n\tunsigned char random_row;\n\n\tfor(int i=0;i #pragma omp parallel for private(random_row)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens4/Queens4.1.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pulation after crossover and mutation\nvoid update_population(void){\n\t//population = new population\n\tfor(int i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens4/Queens4.0.c", "omp_pragma_line": "#pragma omp parallel for private(gene1, gene2)", "context_chars": 100, "text": "TION_SIZE-1] = 1.0;\n\t\n\twhile(1){\n\t\tfound = fitness();\n\t\tif(found >= 0) break;\n\t\t\n\t\tgeneration++;\n\n\t\tfor(int pair=0;pair #pragma omp parallel for private(gene1, gene2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens4/Queens4.0.c", "omp_pragma_line": "#pragma omp parallel for private(random_row)", "context_chars": 100, "text": "ray is used --> max(N) = 256 = max(queens)\nvoid population_init(void){\n\tunsigned char random_row;\n\n\tfor(int i=0;i #pragma omp parallel for private(random_row)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/5. N queens problem/Queens4/Queens4.0.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "pulation after crossover and mutation\nvoid update_population(void){\n\t//population = new population\n\tfor(int i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/4. Neural networks/NN4/NN4.2.c", "omp_pragma_line": "#pragma omp parallel for private(y)", "context_chars": 100, "text": "n temp/((1+temp)*(1+temp));\n}\n\nvoid activate_NN(double *vector_in) {\n\tdouble y;\n\n\t//internal layer\n\tfor(int i=0;i #pragma omp parallel for private(y)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/4. Neural networks/NN4/NN4.2.c", "omp_pragma_line": "#pragma omp parallel for private(acc)", "context_chars": 100, "text": "d_derivative(DL2[i]) * (desired[i] - OL2[i]); //equation III\n\t}\n\t\n\t//internal neurons\n\tdouble acc;\n\tfor(int i=0;i #pragma omp parallel for private(acc)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/4. Neural networks/NN4/NN4.2.c", "omp_pragma_line": "#pragma omp parallel for private(delta_i)", "context_chars": 100, "text": ";\t//equation II\n\t}\n}\n\nvoid update_weights(double *vector_in){\n\tdouble delta_i;\n\n\t//layer 1 weights\n\tfor(int i=0;i #pragma omp parallel for private(delta_i)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/n-roussos/Parallel-Programming-with-OpenMP/4. Neural networks/NN4/NN4.2.c", "omp_pragma_line": "#pragma omp parallel for private(max,predicted_class, OL1, OL2)", "context_chars": 100, "text": "hts(vector_in);\n}\n\ndouble train_accuracy(void){\n\tdouble max;\n\tint predicted_class, correct = 0;\n\n\t//for(int i=0;i max){\n\t\t\t\tmax = OL2[j];\n\t\t\t\tpredicted_class = j;\n\t\t\t}\n\t\t}\n\t\tif(predicted_class == TRAIN_CLASS[i]){\n\t\t\t//#pragma omp atomic update\n\t\t\tcorrect++;\n\t\t}\n\t} #pragma omp parallel for private(max,predicted_class, OL1, OL2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rahulsonone1234/Parallel-Programming/ContrastStretchingImageParallel.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " png_destroy_write_struct(&png, &info);\n\n}\n\nint main() \n{ \n double startTime = omp_get_wtime();\n for(int i=1;i<=800;i++)\n {\n \t char str[25]=\"cat (\";\n char out[25]=\"out (\";\n int a=i;\n int tmp=i;\n int cnt=0;\n while(tmp)\n {\n tmp=tmp/10;\n cnt++;\n }\n int j=cnt-1;\n char pok[25]=\").png\";\n char lok[25];\n while(a)\n {\n int k=a%10;\n lok[j]=(char)('0'+k);\n a=a/10;\n j--;\n }\n lok[cnt]='\\0';\n strcat(str,lok);\n strcat(str,pok);\n strcat(out,lok);\n strcat(out,pok);\n char* s=out;\n char* p=str;\n \n read_png_file(p,s);\n //fflush(stdin);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rahulsonone1234/Parallel-Programming/LoGParallel.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ng_destroy_write_struct(&png, &info);\n}\n\n \n\nint main() {\n double startTime = omp_get_wtime();\n for(int i=1;i<=4;i++)\n {\n char str[25]=\"cat (\";\n char out[25]=\"out (\";\n int a=i;\n int tmp=i;\n int cnt=0;\n while(tmp)\n {\n tmp=tmp/10;\n cnt++;\n }\n int j=cnt-1;\n char pok[25]=\").png\";\n char lok[25];\n while(a)\n {\n int k=a%10;\n lok[j]=(char)('0'+k);\n a=a/10;\n j--;\n }\n lok[cnt]='\\0';\n strcat(str,lok);\n strcat(str,pok);\n strcat(out,lok);\n strcat(out,pok);\n char* s=out;\n char* p=str;\n edgeDetection(p,s);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rahulsonone1234/Parallel-Programming/EdgeDetectionfinalparallel.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "ng_destroy_write_struct(&png, &info);\n}\n\n \n\nint main() {\n double startTime = omp_get_wtime();\n for(int i=1;i<=800;i++)\n {\n char str[25]=\"cat (\";\n char out[25]=\"out (\";\n int a=i;\n int tmp=i;\n int cnt=0;\n while(tmp)\n {\n tmp=tmp/10;\n cnt++;\n }\n int j=cnt-1;\n char pok[25]=\").png\";\n char lok[25];\n while(a)\n {\n int k=a%10;\n lok[j]=(char)('0'+k);\n a=a/10;\n j--;\n }\n lok[cnt]='\\0';\n strcat(str,lok);\n strcat(str,pok);\n strcat(out,lok);\n strcat(out,pok);\n char* s=out;\n char* p=str;\n edgeDetection(p,s);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/rahulsonone1234/Parallel-Programming/negationImageParallel.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "\n png_destroy_write_struct(&png, &info);\n\n\n}\n\n\nint main() \n{\n\tdouble startTime = omp_get_wtime();\n\tfor(int i=1;i<=4;i++)\n\t{\n\t\tchar str[25]=\"cat (\";\n\t\tchar out[25]=\"out (\";\n\t\tint a=i;\n\t\tint tmp=i;\n\t\tint cnt=0;\n\t\twhile(tmp)\n\t\t{\n\t\t\ttmp=tmp/10;\n\t\t\tcnt++;\n\t\t}\n\t\tint j=cnt-1;\n\t\tchar pok[25]=\").png\";\n\t\tchar lok[25];\n\t\twhile(a)\n\t\t{\n\t\t\tint k=a%10;\n\t\t\tlok[j]=(char)('0'+k);\n\t\t\ta=a/10;\n\t\t\tj--;\n\t\t}\n\t\tlok[cnt]='\\0';\n\t\tstrcat(str,lok);\n\t\tstrcat(str,pok);\n\t\tstrcat(out,lok);\n\t\tstrcat(out,pok);\n\t\tchar* s=out;\n\t\tchar* p=str;\n\t\tread_png_file(p,s);\n\t\t\n\t} #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mghojal/Edge-detection-using-laplacian-operator/header.h", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": " \n\tint x,y;\n\t// for each row, column, calculating the new value using Stencil Matrix (laplacian)\n\tfor ( y = 1; y < in->height-2; y++ ) {\n\t\tfor( x = 1; x < in->width-2; x++ ) {\n\t\t\tdouble val = abs(\n\t\t\t8 * image_get_pixeld(in, x, y) -(\n\t\t\timage_get_pixeld(in, x-1, y-1 ) +\n\t\t\timage_get_pixeld(in, x , y-1 ) +\n\t\t\timage_get_pixeld(in, x+1, y-1 ) +\n\t\t\timage_get_pixeld(in, x-1, y ) +\n\t\t\timage_get_pixeld(in, x+1, y ) +\n\t\t\timage_get_pixeld(in, x-1, y+1 ) +\n\t\t\timage_get_pixeld(in, x , y+1 ) +\n\t\t\timage_get_pixeld(in, x+1, y+1 )));\n\t\n\t\t\tbuffer[y*in->width+x] = val;\n\t\t\t//#pragma omp critical\n\t\t\t{\n\t\t\t\tif ( val > max ) max = val;\n\t\t\t\tif ( val < min ) min = val;\n\t\t\t}\t\t\n\t\t}\n\t} #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mghojal/Edge-detection-using-laplacian-operator/header.h", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "n ) min = val;\n\t\t\t}\t\t\n\t\t}\n\t}\n\n\t//Normailzing the values and set pixel value in the proper location\n\tfor ( y = 0; y < in->height; y++ ) {\n\t\tfor( x = 0; x < in->width; x++ ) {\n\t\t\tdouble val = MAX_BRIGHTNESS * (buffer[y*in->width+x] - min) / (max-min);\n\t\t\timage_set_pixel( out, x, y, val );\n\t\t}\n\t} #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mghojal/Edge-detection-using-laplacian-operator/header.h", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "fo_ptr);\n\twidth = png_get_image_width(png_ptr, info_ptr);\n\timage = image_create( width, height );\n\n\tfor ( y = 0; y < height; y++ ) {\n\t\tfor( x = 0; x < width; x++ ) {\n\t\t\tunsigned c = 0;\n\t\t\tunsigned char* ch = (unsigned char*)&c;\n\t\t\tunsigned char* array = row_pointers[y];\n\n\t\t\tch[0] = array[x];\n\t\t\timage_set_pixel(image, x, y, c);\n\t\t}\n\t} #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/nathanlo99/nart/src/raytracer.cpp", "omp_pragma_line": "#pragma omp parallel for schedule(guided) collapse(2)", "context_chars": 100, "text": "_pixels = screen_height * screen_width;\n size_t num_processed = 0;\n auto last_ms = milli_time();\n\nfor (size_t y = 0; y < screen_height; y++) {\n for (size_t x = 0; x < screen_width; x++) {\n vec3 result_colour = vec3();\n for (size_t i = 0; i < aa_num * aa_num; i++) {\n const float rx = (i / aa_num) / aa_num, ry = (i % aa_num) / aa_num;\n const vec3 dir = forward //\n + (x - (screen_width / 2.0f) + rx) * dx //\n + (y - (screen_height / 2.0f) + ry) * dy;\n const Ray ray{camera_position, glm::normalize(dir)};\n const vec3 c = scene.intersect(ray, max_depth);\n result_colour += c;\n }\n result.set(x, y, result_colour / (float)(aa_num * aa_num));\n\n#pragma omp critical\n {\n num_processed++;\n const auto cur_ms = milli_time();\n if (cur_ms - last_ms > 1000) {\n last_ms = cur_ms;\n const float percent = num_processed * 100. / num_pixels;\n INFO(\"Completed \" + std::to_string(percent) + \"% of \" + render_name);\n result.write(ImageFormat::BMP, \"tmp/progress.bmp\");\n }\n }\n }\n } #pragma omp parallel for schedule(guided) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/oldExamples/appFor_vSched-omp.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")malloc(sizeof(float)*probSize);\n\t\n // initialize input vectors, use standard worksharing here. \n for (int i = 0 ; i < probSize ; i++)\n {\n a[i] = i*1.0;\n b[i] = 1.0;\n#ifdef VERBOSE\n int myTid = omp_get_thread_num();\n printf(\"tid in init = %d\", myTid);\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/oldExamples/app2For_vSched-omp.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")malloc(sizeof(float)*probSize);\n\t\n // initialize input vectors, use standard worksharing here. \n for (int i = 0 ; i < probSize ; i++)\n {\n a[i] = i*1.0;\n b[i] = 1.0;\n#ifdef VERBOSE\n int myTid = omp_get_thread_num();\n printf(\"tid in init = %d\", myTid);\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/oldExamples/appFor-OpenMP-vSched.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")malloc(sizeof(float)*probSize);\n\t\n // initialize input vectors, use standard worksharing here. \n for (int i = 0 ; i < probSize ; i++)\n {\n a[i] = i*1.0;\n b[i] = 1.0;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/examples/appFor_omp-lols.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")malloc(sizeof(float)*probSize);\n\t\n // initialize input vectors, use standard worksharing here. \n for (int i = 0 ; i < probSize ; i++)\n {\n a[i] = i*1.0;\n b[i] = 1.0;\n#ifdef VERBOSE\n int myTid = omp_get_thread_num();\n printf(\"tid in init = %d\", myTid);\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/tests/perf/testOneFor_omp-lols.C", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": ")malloc(sizeof(float)*probSize);\n\t\n // initialize input vectors, use standard worksharing here. \n for (int i = 0 ; i < probSize ; i++)\n {\n a[i] = i*1.0;\n b[i] = 1.0;\n#ifdef VERBOSE\n int myTid = omp_get_thread_num();\n printf(\"tid in init = %d\", myTid);\n\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/share/ukernels/mandelbrot-hybrid.cpp", "omp_pragma_line": "#pragma omp parallel for private(i, j)", "context_chars": 100, "text": " me start of %d\\n\",iproc,myRecvArr[0]);\n\t\t\t\tmyJobStart = myRecvArr[0];\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t// do work\n\t\t\t\tfor (j = 0; j < BLOCK_HEIGHT; j++) {\n\t\t\t\t\tfor (i = 0; i < BLOCK_WIDTH; i++) {\n\t\t\t\t\t\tpixels[i + j * BLOCK_WIDTH] = computePoint(i, j + myJobStart / 1536);\n\t\t\t\t\t\t//fprintf(stderr,\"%d \",pixels[i + j * BLOCK_WIDTH]);\n\t\t\t\t\t}\n\t\t\t\t} #pragma omp parallel for private(i, j)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/share/ukernels/heat-mpi.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "need ghost values of X but we'll throw them in\n// as X[0] and X[N+1].\n//\n x = new double[n+2];\n\n for ( i = 0; i <= n + 1; i++ )\n {\n x[i] = ( ( double ) ( id * n + i - 1 ) * x_max\n + ( double ) ( p * n - id * n - i ) * x_min )\n / ( double ) ( p * n - 1 );\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/share/ukernels/heat-mpi.cpp", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "LD, &status );\n #endif\n }\n//\n// Update the temperature based on the four point stencil.\n//\nfor ( i = 1; i <= n; i++ )\n {\n h_new[i] = h[i] \n + ( time_delta * k / x_delta / x_delta ) * ( h[i-1] - 2.0 * h[i] + h[i+1] ) \n + time_delta * rhs ( x[i], time );\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/share/ukernels/matVec-mpi.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "alloc ( n * sizeof ( double ) );\n b = ( double * ) malloc ( m * sizeof ( double ) );\n\n k = 0;\nfor ( i = 1; i <= m; i++ ) \n {\n for ( j = 1; j <= n; j++ )\n {\n a[k] = sqrt ( 2.0 / ( double ) ( n + 1 ) ) \n * sin ( ( double ) ( i * j ) * pi / ( double ) ( n + 1 ) );\n k = k + 1;\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/share/ukernels/matVec-mpi.c", "omp_pragma_line": "#pragma omp parallel for num_threads(16)", "context_chars": 100, "text": "o, except that entry J_ONE will be 1.\n Pick any value of J_ONE between 1 and M.\n*/\n j_one = 17;\nfor ( i = 0; i < n; i++ )\n {\n x[i] = sqrt ( 2.0 / ( double ) ( n + 1 ) ) \n * sin ( ( double ) ( ( i + 1 ) * j_one ) * pi / ( double ) ( n + 1 ) );\n printf(\"thread %d doing iteration %d \\n\", omp_get_thread_num(), i);\n } #pragma omp parallel for num_threads(16)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vlkale/lw-sched/share/ukernels/matVec-mpi.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": " printf ( \" Process %d shutting down.\\n\", my_id );\n break;\n }\n\n \n ans = 0.0;\nfor ( i = 0; i < n; i++ )\n {\n ans = ans + a_row[i] * x[i];\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/garciparedes/cc-examples/parallel/openmp/uva/proof3.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(a) lastprivate(b)", "context_chars": 100, "text": "h>\n#include\n\nint main() {\n\n\tomp_set_num_threads(4);\n int i;\n int a = 0;\n int b = 5;\n for (i=0; i<4; i++) {\n b = a + 1;\n\t a = b;\n } #pragma omp parallel for firstprivate(a) lastprivate(b)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/garciparedes/cc-examples/parallel/openmp/uva/proof2.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "lib.h>\n#include\n\nint main() {\n\n\tomp_set_num_threads(4);\n int i, v[4];\n int a = 2;\n\n for (i=0; i<4; i++) v[i] = i;\n\n #pragma omp parallel for\n for (i=0; i<4; i++) {\n v[i] = a + v[i];\n a = a + 1;\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/garciparedes/cc-examples/parallel/openmp/uva/proof2.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " int i, v[4];\n int a = 2;\n\n #pragma omp parallel for \n for (i=0; i<4; i++) v[i] = i;\n\n for (i=0; i<4; i++) {\n v[i] = a + v[i];\n a = a + 1;\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " not ensured to be contiguous\n\trows = (double*) malloc(sizeof(double)*subprob_size*subprob_size);\n\n\tfor (i=0;i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for collapse(2)", "context_chars": 100, "text": "n_num, int row_num) {\n\tint i, j;\n\n\t// Initialize matrix\n\t// First time all values are INITIAL_GRID\n\tfor(i=0; i #pragma omp parallel for collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "in the first column\n\tif (column_num == 0){\n\t\t// I'm in the first row column\n\t\tif (row_num == 0){\n\t\t\tfor(i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i] = BC_HOT;\n\t\t\t}\n\t\t}\n\t\t// I'm in the last row\n\t\telse if(row_num == ((int) sqrt(n_subprobs))-1){\n\t\t\tfor(i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "lse if(column_num == ((int) sqrt(n_subprobs))-1){\n\t\t// I'm in the first row\n\t\tif (row_num == 0){\n\t\t\tfor(i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "i] = BC_HOT;\n\t\t\t}\n\t\t}\n\t\t// I'm in the last row\n\t\telse if(row_num == ((int) sqrt(n_subprobs))-1){\n\t\t\tfor(i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f (column_num != ((int) sqrt(n_subprobs))-1){\n\t\t\t// send the last column of my subproblem matrix\n\t\t\tfor(i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " in the first column\n\t\tif (column_num != 0){\n\t\t\t// send the first column of my subproblem matrix\n\t\t\tfor(i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:maxdiff) collapse(2)", "context_chars": 100, "text": " EL = 0.2*( EL + UP + DOWN + LEFT + RIGHT );\n\t\t// Inner rows i=[1...subprob_size-2]\n\t\tfor(i=1;i maxdiff) maxdiff = fabs(b[i][j]-a[i][j]);\n\t\t\t}\n\t\t} #pragma omp parallel for reduction(max:maxdiff) collapse(2)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:maxdiff)", "context_chars": 100, "text": "if (fabs(b[i][j]-a[i][j]) > maxdiff) maxdiff = fabs(b[i][j]-a[i][j]);\n\t\t// j=[1...subprob_size-2]\n\t\tfor(j=1;j maxdiff) maxdiff = fabs(b[i][j]-a[i][j]);\n\t\t} #pragma omp parallel for reduction(max:maxdiff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:maxdiff)", "context_chars": 100, "text": "][j]-a[i][j]) > maxdiff) maxdiff = fabs(b[i][j]-a[i][j]);\n\n\t\t// Inner rows i=[1...subprob_size-2]\n\t\tfor(i=1;i maxdiff) maxdiff = fabs(b[i][j]-a[i][j]);\n\t\t} #pragma omp parallel for reduction(max:maxdiff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:maxdiff)", "context_chars": 100, "text": "ff[i]+a[i][j+1]);\n\t\t\tif (fabs(b[i][j]-a[i][j]) > maxdiff) maxdiff = fabs(b[i][j]-a[i][j]);\n\t\t}\n\t\t\n\t\tfor(i=1;i maxdiff) maxdiff = fabs(b[i][j]-a[i][j]);\n\t\t} #pragma omp parallel for reduction(max:maxdiff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mcanalesmayo/jacobi-mpi/jacobi_par.c", "omp_pragma_line": "#pragma omp parallel for reduction(max:maxdiff)", "context_chars": 100, "text": "if (fabs(b[i][j]-a[i][j]) > maxdiff) maxdiff = fabs(b[i][j]-a[i][j]);\n\t\t// j=[1...subprob_size-2]\n\t\tfor(j=1;j maxdiff) maxdiff = fabs(b[i][j]-a[i][j]);\n\t\t} #pragma omp parallel for reduction(max:maxdiff)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abhishekgupta-1/Parallel-Computing-Assignment/submission/tsp_openmp/tsp_openmp.c", "omp_pragma_line": "#pragma omp parallel for default(none) \\", "context_chars": 100, "text": ");\n final_res = curr_res;\n }\n }\n return;\n }\n \n int i;\nfirstprivate(curr_bound, curr_weight, level)\\\n shared(curr_pat, vis,adj,final_res,N)\n for (i=0; i #pragma omp parallel for default(none) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abhishekgupta-1/Parallel-Computing-Assignment/submission/tsp_openmp/tsp_openmp.c", "omp_pragma_line": "#pragma omp parallel for reduction(+:curr_bound)", "context_chars": 100, "text": "\n memset(curr_path, -1,sizeof(curr_path));\n memset(visited, 0, sizeof(visited));\n \n int i;\nfor (i=0; i #pragma omp parallel for reduction(+:curr_bound)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/fmorenovr/ComputerScience_UNI/CC301-Algoritmos_Paralelos/Clase_7/Ejercicio_3/minMaxVector_openmp.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule(static,chunksize)", "context_chars": 100, "text": "%d\\n\",*(minMax_array+1));\n return 0;\n}\n\nvoid\nllenar(int *a)\n{\n int i;\n omp_set_num_threads(4);\n for(i=0;imax_val)\n max_val=a[i];\n if(min_val>a[i])\n min_val=a[i];\n }\n static int b[2];\n b[0]=min_val;\n b[1]=max_val;\n return b;\n} #pragma omp parallel for ordered schedule(static,chunksize)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/fmorenovr/ComputerScience_UNI/CC301-Algoritmos_Paralelos/Clase_7/Ejercicio_3/minMaxVector_openmp.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule(static,chunksize) \\", "context_chars": 100, "text": ")=rand()%50;\n}\n\nint*\nminMax(int*a)\n{\n int i,min_val=a[0],max_val=a[0];\n omp_set_num_threads(4);\n reduction(max:max_val) reduction(min:min_val)\n for(i=1;imax_val)\n max_val=a[i];\n if(min_val>a[i])\n min_val=a[i];\n } #pragma omp parallel for ordered schedule(static,chunksize) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/fmorenovr/ComputerScience_UNI/CC301-Algoritmos_Paralelos/Clase_7/Ejercicio_2/ordered_openmp.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule(static,chunksize)", "context_chars": 100, "text": "alelo\\nEl vector a es:\\n\");\n for(i=0;ifor(i=0;i #pragma omp parallel for ordered schedule(static,chunksize)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/fmorenovr/ComputerScience_UNI/CC301-Algoritmos_Paralelos/Clase_10/Ejercicio_1/bitonicSort_openmp.c", "omp_pragma_line": "#pragma omp parallel for private(start) shared(sequence, end, up, distance)", "context_chars": 100, "text": "p, int * sequence, int size)\n{\n int distance = size/2;\n int * start, * end = sequence+distance;\n for(start = sequence; start < end; start++)\n if( (*start > *(start+distance)) == up)\n swap(start, start+distance);\n}\n\nvoid swap(int * x, int * y)\n{\n int temp = *x;\n *x = *y;\n *y = temp;\n} #pragma omp parallel for private(start) shared(sequence, end, up, distance)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/fmorenovr/ComputerScience_UNI/CC301-Algoritmos_Paralelos/Clase_10/Ejercicio_1/generateBitonicArray.c", "omp_pragma_line": "#pragma omp parallel for ordered schedule(static,5)", "context_chars": 100, "text": " // 2^14 = 16384 maximo\n omp_set_num_threads(4);\n fprintf(fp,\"%li\\n\",n);\n for(i=0;i #pragma omp parallel for ordered schedule(static,5)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/fmorenovr/ComputerScience_UNI/CC462-Sistemas_Concurrentes_y_Distribuidos/Labo_1/example/matrix.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "trix(quantum_matrix m) \n{\n int i, j, z=0;\n /* int l; */\n\n while ((1 << z++) < m.rows);\n z--;\n\n {\n /* for (l=z-1; l>=0; l--) \n\t{\n\t if ((l % 4 == 3))\n\t printf(\" \");\n\t printf(\"%i\", (i >> l) & 1);\n\t } */\n #pragma omp parallel for\n for(j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/fmorenovr/ComputerScience_UNI/CC462-Sistemas_Concurrentes_y_Distribuidos/Labo_1/example/matrix.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " l>=0; l--) \n\t{\n\t if ((l % 4 == 3))\n\t printf(\" \");\n\t printf(\"%i\", (i >> l) & 1);\n\t } */\n for(j=0; j #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/fmorenovr/ComputerScience_UNI/CC462-Sistemas_Concurrentes_y_Distribuidos/Labo_1/example/matrix.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "f(A.cols != B.rows)\n quantum_error(QUANTUM_EMSIZE);\n C = quantum_new_matrix(B.cols, A.rows);\n\n for(i=0; i #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/slopes.C", "omp_pragma_line": "#pragma omp parallel for private(currentPtr,Curr_El)", "context_chars": 100, "text": "--------\n //-------------------and --------------------------\n \n /* mdj 2007-02 */\n //for(ti_ndx_t ndx = 0; ndx < El_Table->size(); ndx++)\n {\n if(El_Table->adapted_[ndx] > 0)//if this element does not belong on this processor don't involve!!!\n {\n El_Table->elenode_[ndx].get_slopes(El_Table, NodeTable, matprops_ptr->gamma);\n }\n } #pragma omp parallel for private(currentPtr,Curr_El)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": "\n double VxVy[2];\n double dt2 = .5 * dt; // dt2 is set as dt/2 !\n\n Element* Curr_El;\n ////private(currentPtr,Curr_El,IF_STOPPED,influx,j,k,curr_time,flux_src_coef,VxVy)\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n\n Curr_El = &(elements_[ndx]);\n elements_[ndx].update_prev_state_vars();\n\n influx[0] = Influx_[0][ndx];\n influx[1] = Influx_[1][ndx];\n influx[2] = Influx_[2][ndx];\n\n //note, now there is no check for fluxes from non-local elements\n if(!(influx[0] >= 0.0))\n {\n printf(\"negative influx=%g\\n\", influx[0]);\n assert(0);\n }\n\n // -- calc contribution of flux source\n curr_time = (timeprops_ptr->cur_time) * (timeprops_ptr->TIME_SCALE);\n\n\n //VxVy[2];\n if(h[ndx] > GEOFLOW_TINY)\n {\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n }\n else\n {\n VxVy[0] = VxVy[1] = 0.0;\n }\n\n#ifdef STOPCRIT_CHANGE_SOURCE\n IF_STOPPED=Curr_El->stoppedflags();\n#else\n IF_STOPPED = !(!(Curr_El->stoppedflags()));\n\n double g[3]{gravity_[0][ndx],gravity_[1][ndx],gravity_[2][ndx]};\n double d_g[3]{d_gravity_[0][ndx],d_gravity_[1][ndx],d_gravity_[2][ndx]};\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n //predictor itself\n\n //NOTE: d(g[2]*Elm->state_vars(0))/dx is approximated by g[2]*dUvec[0]/dx !!!\n double c_sq;\n double h_inv;\n double tanbed;\n double VxVyS[2];\n double unitvx, unitvy;\n double tmp, sgn_dudy,sgn_dvdx;\n double forcegrav;\n double forceintx,forceinty, forcebedx,forcebedy;\n double forcebedequil, forcebedmax;\n double speed;\n // curv := inverse of radius of curvature = second derivative of\n // position normal to tangent with respect to distance along tangent,\n // if dz/dx=0 curve=d2z/dx2, otherwise rotate coordinate system so\n // dz/dx=0, that is mathematical definition of curvature I believe\n // laercio returns d2z/dx2 whether or not dz/dx=0 in his GIS functions\n\n if (IF_STOPPED == 2) {\n VxVy[0] = 0.0;\n VxVy[1] = 0.0;\n VxVyS[0] = 0.0;\n VxVyS[1] = 0.0;\n }\n else {\n //VxVy[0] = VxVy[0];\n //Elm->state_vars(1)/Elm->state_vars(0);\n //VxVy[1] = VxVy[1];\n //Elm->state_vars(2)/Elm->state_vars(0);\n VxVyS[0] = VxVy[0];\n VxVyS[1] = VxVy[1];\n }\n\n\n c_sq = kactxy_[0][ndx] * g[2] * h[ndx];\n //h_inv := 1/h[ndx];\n\n h[ndx]=h[ndx] - dt2 * (dhVx_dx[ndx] + dhVy_dy[ndx] + influx[0]);\n h[ndx]=c_dmax1(h[ndx], 0.0);\n\n //dF/dU, dG/dU and S terms if h[ndx] > TINY !\n if (h[ndx] > tiny) {\n h_inv = 1.0 / h[ndx];\n tanbed = tan(matprops_ptr->bedfrict[material_[ndx]]);\n\n //here speed is speed squared\n speed = VxVy[0] * VxVy[0] + VxVy[1] * VxVy[1];\n if (speed > 0.0) {\n //here speed is speed\n speed = sqrt(speed);\n unitvx = VxVy[0] / speed;\n unitvy = VxVy[1] / speed;\n }\n else {\n unitvx = 0.0;\n unitvy = 0.0;\n }\n\n //dnorm=dsqrt(Uprev[1]**2+Uprev[2]**2+tiny**2)\n\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n //****** X-dir ******\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // dF/dU and dG/dU terms\n hVx[ndx]=hVx[ndx] -\n dt2 * ((c_sq - VxVy[0] * VxVy[0]) * dh_dx[ndx] +\n 2.0 * VxVy[0] * dhVx_dx[ndx] -\n VxVy[0] * VxVy[1] * dh_dy[ndx] +\n VxVy[1] * dhVx_dy[ndx] +\n VxVy[0] * dhVy_dy[ndx] +\n influx[1]);\n\n // x direction source terms\n\n // the gravity force in the x direction\n forcegrav = g[0] * h[ndx];\n\n // the internal friction force\n tmp = h_inv * (dhVx_dy[ndx] - VxVyS[0] * dh_dy[ndx]);\n sgn_dudy = sgn_tiny(tmp, frict_tiny);\n forceintx = sgn_dudy * h[ndx] * kactxy_[0][ndx] * (g[2] * dh_dy[ndx] + d_g[1] * h[ndx]) * sin(int_frict);\n\n // the bed friction force for fast moving flow\n forcebedx = unitvx * c_dmax1(g[2] * h[ndx] + VxVyS[0] * hVx[ndx] * curvature_[0][ndx], 0.0) * tanbed;\n\n if (IF_STOPPED == 2 && 1 == 0) {\n //the bed friction force for stopped or nearly stopped flow\n\n //the static friction force is LESS THAN or equal to the friction\n //coefficient times the normal force but it can NEVER exceed the\n //NET force it is opposing\n\n //maximum friction force the bed friction can support\n forcebedmax = c_dmax1(g[2] * h[ndx] + VxVyS[0] * hVx[ndx] * curvature_[0][ndx], 0.0) * tanbed;\n\n // the NET force the bed friction force is opposing\n forcebedequil = forcegrav - forceintx;\n // -kactxy*g[2]*Elm->state_vars(0)*dh_dx\n\n\n // the \"correct\" stopped or nearly stopped flow bed friction force\n // (this force is not entirely \"correct\" it will leave a \"negligible\"\n // (determined by stopping criteria) amount of momentum in the cell\n forcebedx = sgn_tiny(forcebedequil, c_dmin1(forcebedmax,\n dabs(forcebedx) + dabs(forcebedequil)));\n // forcebedx=\n // sgn(forcebed2,dmin1(forcebed1,dabs(forcebed2)))\n // else\n }\n\n // all the x source terms\n hVx[ndx]=hVx[ndx] + dt2 * (forcegrav - forcebedx - forceintx);\n // write(*,*) 'int', forceintx, 'bed', forcebedx\n\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n //****** Y-dir ******\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n //dF/dU and dG/dU terms\n hVy[ndx]=hVy[ndx] -\n dt2 * ((c_sq - VxVy[1] * VxVy[1]) * dh_dy[ndx] +\n 2.0 * VxVy[1] * dhVy_dy[ndx] -\n VxVy[0] * VxVy[1] * dh_dx[ndx] +\n VxVy[1] * dhVx_dx[ndx] +\n VxVy[0] * dhVy_dx[ndx] +\n influx[2]);\n\n //the gravity force in the y direction\n forcegrav = g[1] * h[ndx];\n\n //the internal friction force\n tmp = h_inv * (dhVy_dx[ndx] - VxVyS[1] * dh_dx[ndx]);\n sgn_dvdx = sgn_tiny(tmp, frict_tiny);\n forceinty = sgn_dvdx * h[ndx] * kactxy_[0][ndx] * (g[2] * dh_dx[ndx] + d_g[0] * h[ndx]) * sin(int_frict);\n\n //the bed friction force for fast moving flow\n forcebedy = unitvy *\n c_dmax1(g[2] * h[ndx] + VxVyS[1] * hVy[ndx] * curvature_[1][ndx], 0.0)\n * tanbed;\n\n if (IF_STOPPED == 2 && 1 == 0) {\n //the bed friction force for stopped or nearly stopped flow\n\n forcebedmax =\n c_dmax1(g[2] * h[ndx] + VxVyS[1] * hVy[ndx] * curvature_[1][ndx], 0.0)\n * tanbed;\n\n //the NET force the bed friction force is opposing\n forcebedequil = forcegrav\n // $ -kactxy*g[2]*Elm->state_vars(0)*dh_dy\n - forceinty;\n\n //the \"correct\" stopped or nearly stopped flow bed friction force\n //(this force is not entirely \"correct\" it will leave a \"negligible\"\n //(determined by stopping criteria) amount of momentum in the cell\n forcebedy = sgn_tiny(forcebedequil, c_dmin1(forcebedmax,\n fabs(forcebedy) + fabs(forcebedequil)));\n\n // forcebedy=sgn(forcebed2,dmin1(forcebed1,dabs(forcebed2)))\n // else\n }\n\n // all the y source terms\n hVy[ndx]=hVy[ndx] + dt2 * (forcegrav - forcebedy - forceinty);\n\n }\n\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n // apply bc's\n for(int j = 0; j < 4; j++)\n if(Curr_El->neigh_proc(j) == INIT) // this is a boundary!\n for(int k = 0; k < NUM_STATE_VARS; k++)\n state_vars_[k][ndx]=0.0;\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK) \\", "context_chars": 100, "text": "ISPC_cor);\n //ANNOTATE_TASK_BEGIN(Integrator_SinglePhase_Coulomb_FirstOrder_corrector_loop);\n reduction(+: m_forceint, m_forcebed, m_eroded, m_deposited, m_realvolume)\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n //ANNOTATE_ITERATION_TASK(ISPC_cor_iter);\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n //if first order states was not updated as there is no predictor\n if(order==1)\n {\n for (int i = 0; i < NUM_STATE_VARS; i++)\n prev_state_vars_[i][ndx]=state_vars_[i][ndx];\n }\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n double elem_forceint;\n double elem_forcebed;\n double elem_eroded;\n double elem_deposited;\n\n double dxdy = dx_[0][ndx] * dx_[1][ndx];\n double dtdx = dt / dx_[0][ndx];\n double dtdy = dt / dx_[1][ndx];\n\n int xp = positive_x_side_[ndx];\n int yp = (xp + 1) % 4;\n int xm = (xp + 2) % 4;\n int ym = (xp + 3) % 4;\n\n int ivar, j, k;\n\n double fluxxp[MAX_NUM_STATE_VARS], fluxyp[MAX_NUM_STATE_VARS];\n double fluxxm[MAX_NUM_STATE_VARS], fluxym[MAX_NUM_STATE_VARS];\n\n\n ti_ndx_t nxp = node_key_ndx_[xp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxp[ivar] = node_flux_[ivar][nxp];\n\n ti_ndx_t nyp = node_key_ndx_[yp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxyp[ivar] = node_flux_[ivar][nyp];\n\n ti_ndx_t nxm = node_key_ndx_[xm + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxm[ivar] = node_flux_[ivar][nxm];\n\n ti_ndx_t nym = node_key_ndx_[ym + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxym[ivar] = node_flux_[ivar][nym];\n\n\n /* the values being passed to correct are for a SINGLE element, NOT a\n region, as such the only change that having variable bedfriction\n requires is to pass the bedfriction angle for the current element\n rather than the only bedfriction\n I wonder if this is legacy code, it seems odd that it is only called\n for the SUN Operating System zee ../geoflow/correct.f */\n\n#ifdef STOPPED_FLOWS\n #ifdef STOPCRIT_CHANGE_SOURCE\n int IF_STOPPED=stoppedflags_[ndx];\n #else\n int IF_STOPPED = !(!stoppedflags_[ndx]);\n \n\n\n\n double VxVy[2];\n if(h[ndx] > tiny)\n {\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n }\n else\n {\n VxVy[0] = VxVy[1] = 0.0;\n }\n\n elements_[ndx].convect_dryline(VxVy[0], VxVy[1], dt); //this is necessary\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n //corrector itself\n\n double speed;\n double forceintx, forceinty;\n double forcebedx, forcebedy;\n double forcebedx_curv, forcebedy_curv;\n double forcebedmax, forcebedequil, forcegravx , forcegravy;\n double unitvx, unitvy;\n double tanbed;\n double Ustore[3];\n\n double h_inv;\n double sgn_dudy, sgn_dvdx, tmp;\n double es, totalShear;\n\n Ustore[0] = prev_state_vars_[0][ndx]\n - dtdx * (fluxxp[0] - fluxxm[0])\n - dtdy * (fluxyp[0] - fluxym[0])\n + dt * Influx_[0][ndx];\n Ustore[0] = c_dmax1(Ustore[0], 0.0);\n\n Ustore[1] = prev_state_vars_[1][ndx]\n - dtdx * (fluxxp[1] - fluxxm[1])\n - dtdy * (fluxyp[1] - fluxym[1])\n + dt * Influx_[1][ndx];\n\n Ustore[2] = prev_state_vars_[2][ndx]\n - dtdx * (fluxxp[2] - fluxxm[2])\n - dtdy * (fluxyp[2] - fluxym[2])\n + dt * Influx_[2][ndx];\n\n // initialize to zero\n forceintx = 0.0;\n forcebedx = 0.0;\n forcebedx_curv = 0.0;\n forcebedy_curv = 0.0;\n forceinty = 0.0;\n forcebedy = 0.0;\n unitvx = 0.0;\n unitvy = 0.0;\n elem_eroded = 0.0;\n\n if(h[ndx] > tiny)\n {\n double inertial_x,inertial_y,drag_x, drag_y;\n // S terms\n // here speed is speed squared\n speed = VxVy[0] * VxVy[0] + VxVy[1] * VxVy[1];\n if (speed > 0.0)\n {\n // here speed is speed\n speed = sqrt(speed);\n unitvx = VxVy[0] / speed;\n unitvy = VxVy[1] / speed;\n }\n else\n {\n unitvx = 0.0;\n unitvy = 0.0;\n }\n tanbed = tan(bedfrictang[ndx]);\n h_inv = 1.0 / h[ndx];\n\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // x direction source terms\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // the gravity force in the x direction\n forcegravx = g[0][ndx] * h[ndx];\n\n // the internal friction force\n tmp = h_inv * (dhVx_dy[ndx] - VxVy[0] * dh_dy[ndx]);\n sgn_dudy = sgn_tiny(tmp, frict_tiny);\n forceintx = sgn_dudy * h[ndx]* kactxy[ndx] * (g[2][ndx] * dh_dy[ndx] + dgdx[1][ndx] * h[ndx]) * sin_intfrictang;\n\n // the bed friction force for fast moving flow\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[0] * hVx[ndx] * curvature_[0][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedx = unitvx * tanbed * g[2][ndx] * h[ndx];\n \tforcebedx_curv = unitvx * tanbed * VxVy[0] * hVx[ndx] * curvature_[0][ndx];\n }\n#ifdef STOPPED_FLOWS\n if (IF_STOPPED == 2 && 1 == 0) {\n // the bed friction force for stopped or nearly stopped flow\n\n // the static friction force is LESS THAN or equal to the friction\n // coefficient times the normal force but it can NEVER exceed the\n // NET force it is opposing\n\n // maximum friction force the bed friction can support\n forcebedmax = g[2][ndx] * h[ndx] * tanbed;\n\n // the NET force the bed friction force is opposing\n forcebedequil = forcegrav - forceintx;\n // $ -kactxy*g[2]*EmTemp->state_vars(0)*dh_dx\n\n // the \"correct\" stopped or nearly stopped flow bed friction force\n // (this force is not entirely \"correct\" it will leave a \"negligible\"\n // (determined by stopping criteria) amount of momentum in the cell\n forcebedx = sgn_tiny(forcebedequil, c_dmin1(forcebedmax, fabs(forcebedx) + fabs(forcebedequil)));\n // forcebedx=sgn_tiny(forcebed2,dmin1(forcebed1,fabs(forcebed2)))\n\n // not really 1 but this makes friction statistics accurate\n unitvx = 1.0;\n // else\n\n }\n\n\n tmp = Ustore[1] + dt * (forcegravx - forcebedx - forcebedx_curv - forceintx);\n //STOPPING CRITERIA\n if(stopping_criteria==1)\n {\n inertial_x = fabs(Ustore[1] + dt * forcegravx);\n drag_x = fabs(dt * (forcebedx + forcebedx_curv + forceintx) );\n\n if (inertial_x <= drag_x)\n tmp = 0.0;\n }\n Ustore[1] = tmp;\n\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // y direction source terms\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // the gravity force in the y direction\n forcegravy = g[1][ndx] * h[ndx];\n\n // the internal friction force\n tmp = h_inv * (dhVy_dx[ndx] - VxVy[1] * dh_dx[ndx]);\n sgn_dvdx = sgn_tiny(tmp, frict_tiny);\n forceinty = sgn_dvdx * h[ndx] * kactxy[ndx] * (g[2][ndx] * dh_dx[ndx] + dgdx[0][ndx] * h[ndx]) * sin_intfrictang;\n\n // the bed friction force for fast moving flow\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[1] * hVy[ndx] * curvature_[1][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedy = unitvy * tanbed * g[2][ndx] * h[ndx];\n \tforcebedy_curv = unitvy * tanbed * VxVy[1] * hVy[ndx] * curvature_[1][ndx];\n }\n#ifdef STOPPED_FLOWS\n if (IF_STOPPED == 2 && 1 == 0) {\n // the bed friction force for stopped or nearly stopped flow\n\n // the NET force the bed friction force is opposing\n forcebedequil = forcegrav - forceinty;\n // $ -kactxy*g[2]*EmTemp->state_vars(0)*dh_dy\n\n // the \"correct\" stopped or nearly stopped flow bed friction force\n // (this force is not entirely \"correct\" it will leave a \"negligible\"\n // (determined by stopping criteria) amount of momentum in the cell\n forcebedy = sgn_tiny(forcebedequil, c_dmin1(forcebedmax, fabs(forcebedy) + fabs(forcebedequil)));\n\n // not really 1 but this makes friction statistics accurate\n unitvy = 1.0;\n // else\n }\n\n tmp = Ustore[2] + dt * (forcegravy - forcebedy - forcebedy_curv - forceinty);\n //STOPPING CRITERIA\n if(stopping_criteria==1)\n {\n inertial_y = fabs(Ustore[2] + dt * forcegravy);\n drag_y = fabs(dt * (forcebedy + forcebedy_curv + forceinty) );\n\n if (inertial_y <= drag_y)\n tmp = 0.0;\n }\n Ustore[2] = tmp;\n\n\n#ifdef STOPPED_FLOWS\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // (erosion terms) this is Camil's logic, Keith changed some variable\n //names for clarity\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n if ((false) && (do_erosion != 0) && (IF_STOPPED == 0)) {\n totalShear = sqrt(forcebedx * forcebedx + forcebedy * forcebedy);\n if ((totalShear > threshold) && (h[ndx] > 0.004)) {\n\n es = erosion_rate * sqrt(fabs(totalShear - threshold));\n elem_eroded = dt*es;\n Ustore[0] = Ustore[0] + elem_eroded;\n Ustore[1] = Ustore[1] + elem_eroded * VxVy[0];\n Ustore[2] = Ustore[2] + elem_eroded * VxVy[1];\n //write (*,*) 'Doing Keith Erosion Model'\n }\n }\n\n if ((do_erosion != 0) && (h[ndx] > threshold)) {\n es = erosion_rate * sqrt(hVx[ndx] * hVx[ndx] + hVy[ndx] * hVy[ndx]) / h[ndx];\n Ustore[0] = Ustore[0] + dt * es;\n Ustore[1] = Ustore[1] + dt * es * Ustore[1];\n Ustore[2] = Ustore[2] + dt * es * Ustore[2];\n //write (*,*) 'Doing Camil Erosion Model'\n }\n\n }\n\n\n // computation of magnitude of friction forces for statistics\n elem_forceint = unitvx * forceintx + unitvy*forceinty;\n elem_forcebed = unitvx * forcebedx + unitvy*forcebedy;\n\n // update the state variables\n h[ndx]=Ustore[0];\n hVx[ndx]=Ustore[1];\n hVy[ndx]=Ustore[2];\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n elem_forceint *= dxdy;\n elem_forcebed *= dxdy;\n elem_eroded *= dxdy;\n\n\n if(stoppedflags_[ndx] == 2)\n elem_deposited = h[ndx] * dxdy;\n else\n elem_deposited = 0.0;\n\n if(stoppedflags_[ndx])\n elem_eroded = 0.0;\n\n elements_[ndx].calc_shortspeed(1.0 / dt);\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n\t\tm_forceint += fabs(elem_forceint);\n\t\tm_forcebed += fabs(elem_forcebed);\n\t\tm_realvolume += dxdy * h[ndx];\n\t\tm_eroded += elem_eroded;\n\t\tm_deposited += elem_deposited;\n\n\t\t// apply bc's\n\t\tfor (int j = 0; j < 4; j++)\n\t\t\tif (neigh_proc_[j][ndx] == INIT) // this is a boundary!\n\t\t\t\tfor (int k = 0; k < NUM_STATE_VARS; k++)\n\t\t\t\t\tstate_vars_[k][ndx] = 0.0;\n\t} #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK)", "context_chars": 100, "text": "cosphiSQ*=cosphiSQ;\n\n // Updating K_act/pass based on updated state vars and their derivatives.\n\tfor(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n\t{\n\t\tif(adapted_[ndx] <= 0)\n\t\t{\n\t\t\tdouble vel;\n\t\t\tdouble Kactx, Kacty;\n\n\t if(h[ndx] > tiny)\n\t {\n\t \tdouble hSQ = h[ndx] * h[ndx];\n\t double tanbed = tan(bedfrictang[ndx]);\n\t double tandelSQ = tanbed * tanbed;\n\n\t vel=dhVx_dx[ndx]/h[ndx] - hVx[ndx]*dh_dx[ndx]/hSQ+\n\t dhVy_dy[ndx]/h[ndx] - hVy[ndx]*dh_dy[ndx]/hSQ;\n\t Kactx=(2.0/cosphiSQ)*(1.0-sgn_tiny(vel,tiny)*\n\t sqrt(fabs(1.0-(1.0+tandelSQ)*cosphiSQ) )) -1.0;\n\t Kacty=(2.0/cosphiSQ)*(1.0-sgn_tiny(vel,tiny)*\n\t sqrt(fabs(1.0-(1.0+tandelSQ)*cosphiSQ) )) -1.0;\n\n\t //if there is no yielding...\n\t if(fabs(hVx[ndx]/h[ndx]) < tiny && fabs(hVy[ndx]/h[ndx]) < tiny)\n\t {\n\t Kactx = 1.0;\n\t Kacty = 1.0;\n\t }\n\t }\n\t else\n\t {\n\t \tvel = 0.0;\n\t \tKactx = 1.0;\n\t \tKacty = 1.0;\n\t }\n\n\t effect_kactxy_[0][ndx] = Kactx * scale_.epsilon;\n\t effect_kactxy_[1][ndx] = Kacty * scale_.epsilon;\n\n\t\t}\n\t} #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK) \\", "context_chars": 100, "text": "lquants_ptr->TimeInts[iloc].resize(0);\n \t}\n\n tivector &kactxy=effect_kactxy_[0];\n\n reduction(+: m_force_transx, m_force_transy, m_force_conx, m_force_cony, m_force_gx, m_force_gy) \\\n\t\treduction(+: m_force_bx, m_force_by, m_force_bcx, m_force_bcy, m_force_rx, m_force_ry) \\\n\t\treduction(+: m_power_trans, m_power_con, m_power_g, m_power_b, m_power_bc, m_power_r, m_Fr, m_vol)\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n\n if(h[ndx] > localquants_ptr->thr)\n {\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n double dxdy = dx_[0][ndx] * dx_[1][ndx];\n double dtdx = dt / dx_[0][ndx];\n double dtdy = dt / dx_[1][ndx];\n\n int xp = positive_x_side_[ndx];\n int yp = (xp + 1) % 4;\n int xm = (xp + 2) % 4;\n int ym = (xp + 3) % 4;\n\n int ivar;\n\n double fluxxp[MAX_NUM_STATE_VARS], fluxyp[MAX_NUM_STATE_VARS];\n double fluxxm[MAX_NUM_STATE_VARS], fluxym[MAX_NUM_STATE_VARS];\n\n\n ti_ndx_t nxp = node_key_ndx_[xp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxp[ivar] = node_flux_[ivar][nxp];\n\n ti_ndx_t nyp = node_key_ndx_[yp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxyp[ivar] = node_flux_[ivar][nyp];\n\n ti_ndx_t nxm = node_key_ndx_[xm + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxm[ivar] = node_flux_[ivar][nxm];\n\n ti_ndx_t nym = node_key_ndx_[ym + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxym[ivar] = node_flux_[ivar][nym];\n\n double VxVy[2];\n double tanbed = tan(bedfrictang[ndx]);\n\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n double speed, h_inv;\n double forcetransx, forcetransy;\n double forceconvectx, forceconvecty;\n double forceintx, forceinty;\n double forcebedx, forcebedy;\n double forcebedx_curv, forcebedy_curv;\n double forcegravx , forcegravy;\n double unitvx, unitvy, Local_Fr;\n double sgn_dudy, sgn_dvdx, tmp;\n\n // initialize to zero\n forcetransx = forcetransy = 0.0;\n forceconvectx = forceconvecty = 0.0;\n forceintx = forcebedx = 0.0;\n forcebedx_curv = forcebedy_curv = 0.0;\n forceinty = forcebedy = 0.0;\n unitvx = unitvy = 0.0;\n\n // S terms\n // here speed is speed squared\n speed = VxVy[0] * VxVy[0] + VxVy[1] * VxVy[1];\n\n if (speed > 0.0)\n {\n // here speed is speed\n speed = sqrt(speed);\n unitvx = VxVy[0] / speed;\n unitvy = VxVy[1] / speed;\n }\n else\n {\n unitvx = unitvy = 0.0;\n }\n h_inv = 1.0 / h[ndx];\n Local_Fr = speed / sqrt( g[2][ndx] * h[ndx] * scale_.epsilon);\n\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // x direction source terms\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n // the transient force in the x direction\n forcetransx = (hVx[ndx] - prev_state_vars_[1][ndx]) / dt;\n\n // the convective in x direction\n forceconvectx = (fluxxp[1] - fluxxm[1]) / dx_[0][ndx] + (fluxyp[1] - fluxym[1]) / dx_[1][ndx];\n\n // the gravity force in the x direction\n forcegravx = g[0][ndx] * h[ndx];\n\n // the internal friction force\n tmp = h_inv * (dhVx_dy[ndx] - VxVy[0] * dh_dy[ndx]);\n sgn_dudy = sgn_tiny(tmp, frict_tiny);\n forceintx = sgn_dudy * h[ndx] * kactxy[ndx] * (g[2][ndx] * dh_dy[ndx] + dgdx[1][ndx] * h[ndx]) * sin_intfrictang;\n\n // the bed friction force for fast moving flow\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[0] * hVx[ndx] * curvature_[0][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedx = unitvx * tanbed * g[2][ndx] * h[ndx];\n \tforcebedx_curv = unitvx * tanbed * VxVy[0] * hVx[ndx] * curvature_[0][ndx];\n }\n\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // y direction source terms\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n // the transient force in the y direction\n forcetransy = (hVy[ndx] - prev_state_vars_[2][ndx]) / dt;\n\n // the convective in y direction\n forceconvecty = (fluxxp[2] - fluxxm[2]) / dx_[0][ndx] + (fluxyp[2] - fluxym[2]) / dx_[1][ndx];\n\n // the gravity force in the y direction\n forcegravy = g[1][ndx] * h[ndx];\n\n // the internal friction force\n tmp = h_inv * (dhVy_dx[ndx] - VxVy[1] * dh_dx[ndx]);\n sgn_dvdx = sgn_tiny(tmp, frict_tiny);\n forceinty = sgn_dvdx * h[ndx] * kactxy[ndx] * (g[2][ndx] * dh_dx[ndx] + dgdx[0][ndx] * h[ndx]) * sin_intfrictang;\n\n // the bed friction force for fast moving flow\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[1] * hVy[ndx] * curvature_[1][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedy = unitvy * tanbed * g[2][ndx] * h[ndx];\n \tforcebedy_curv = unitvy * tanbed * VxVy[1] * hVy[ndx] * curvature_[1][ndx];\n }\n\n /////////////////////////////////////////////////////////////////////////////\n // Recording QoIs globally from updated cells\n m_force_transx += (forcetransx * dxdy);\n m_force_transy += (forcetransy * dxdy);\n m_force_conx += (forceconvectx * dxdy);\n m_force_cony += (forceconvecty * dxdy);\n\t\t\tm_force_gx += (forcegravx * dxdy);\n\t\t\tm_force_gy += (forcegravy * dxdy);\n\t\t\tm_force_bx -= (forcebedx * dxdy);\n\t\t\tm_force_by -= (forcebedy * dxdy);\n\t\t\tm_force_bcx -= (forcebedx_curv * dxdy);\n\t\t\tm_force_bcy -= (forcebedy_curv * dxdy);\n\t\t\tm_force_rx -= (forceintx * dxdy);\n\t\t\tm_force_ry -= (forceinty * dxdy);\n\n\t\t\tm_power_trans += (forcetransx * VxVy[0] + forcetransy * VxVy[1]) * dxdy;\n\t\t\tm_power_con += (forceconvectx * VxVy[0] + forceconvecty * VxVy[1]) * dxdy;\n\t\t\tm_power_g += (forcegravx * VxVy[0] + forcegravy * VxVy[1]) * dxdy;\n\t\t\tm_power_b -= (forcebedx * VxVy[0] + forcebedy * VxVy[1]) * dxdy;\n\t\t\tm_power_bc -= (forcebedx_curv * VxVy[0] + forcebedy_curv * VxVy[1]) * dxdy;\n\t\t\tm_power_r -= (forceintx * VxVy[0] + forceinty * VxVy[1]) * dxdy;\n\n\t\t m_Fr += (Local_Fr * dxdy * h[ndx]);\n\t\t m_vol += dxdy * h[ndx];\n\n\t\t\t// Searching user-defined locations to record QoIs\n\t\t\tif (localquants_ptr->no_locations > 0)\n\t\t\t\tlocalquants_ptr->FindElement(dt, dx_[0][ndx], dx_[1][ndx],\n\t\t\t\t\t\tcoord_[0][ndx], coord_[1][ndx], h[ndx], hVx[ndx],\n\t\t\t\t\t\thVy[ndx], forcetransx, forcetransy, forceconvectx,\n\t\t\t\t\t\tforceconvecty, forcegravx, forcegravy, -forcebedx,\n\t\t\t\t\t\t-forcebedy, -forcebedx_curv, -forcebedy_curv, -forceintx,\n\t\t\t\t\t\t-forceinty, zeta_[0][ndx], zeta_[1][ndx], Local_Fr);\n\t\t}\n\t} #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) \\", "context_chars": 100, "text": "me = 0.0;\n\n double inv_xi= 1.0/xi;\n\n //convinience ref\n tivector *g=gravity_;\n\n reduction(+: m_forceint, m_forcebed, m_eroded, m_deposited, m_realvolume)\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n //if first order states was not updated as there is no predictor\n if(order==1)\n {\n for (int i = 0; i < NUM_STATE_VARS; i++)\n prev_state_vars_[i][ndx]=state_vars_[i][ndx];\n }\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n double elem_forceint;\n double elem_forcebed;\n double elem_eroded;\n double elem_deposited;\n\n\n double dxdy = dx_[0][ndx] * dx_[1][ndx];\n double dtdx = dt / dx_[0][ndx];\n double dtdy = dt / dx_[1][ndx];\n\n int xp = positive_x_side_[ndx];\n int yp = (xp + 1) % 4;\n int xm = (xp + 2) % 4;\n int ym = (xp + 3) % 4;\n\n int ivar, j, k;\n\n double fluxxp[NUM_STATE_VARS], fluxyp[NUM_STATE_VARS];\n double fluxxm[NUM_STATE_VARS], fluxym[NUM_STATE_VARS];\n\n\n ti_ndx_t nxp = node_key_ndx_[xp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxp[ivar] = node_flux_[ivar][nxp];\n\n ti_ndx_t nyp = node_key_ndx_[yp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxyp[ivar] = node_flux_[ivar][nyp];\n\n ti_ndx_t nxm = node_key_ndx_[xm + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxm[ivar] = node_flux_[ivar][nxm];\n\n ti_ndx_t nym = node_key_ndx_[ym + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxym[ivar] = node_flux_[ivar][nym];\n\n\n double VxVy[2];\n if(h[ndx] > tiny)\n {\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n }\n else\n {\n VxVy[0] = VxVy[1] = 0.0;\n }\n\n elements_[ndx].convect_dryline(VxVy[0], VxVy[1], dt); //this is necessary\n\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n //corrector itself\n double speed, speed_squared;\n double forceintx, forceinty;\n double forcebedx, forcebedy;\n double forcebedx_curv, forcebedy_curv;\n double forcegravx,forcegravy;\n double unitvx, unitvy, tmp;\n double Ustore[3];\n double inertial_x, inertial_y;\n double drag_x, drag_y;\n\n Ustore[0] = prev_state_vars_[0][ndx]\n - dtdx * (fluxxp[0] - fluxxm[0])\n - dtdy * (fluxyp[0] - fluxym[0])\n + dt * Influx_[0][ndx];\n Ustore[0] = c_dmax1(Ustore[0], 0.0);\n\n Ustore[1] = prev_state_vars_[1][ndx]\n - dtdx * (fluxxp[1] - fluxxm[1])\n - dtdy * (fluxyp[1] - fluxym[1])\n + dt * Influx_[1][ndx];\n\n Ustore[2] = prev_state_vars_[2][ndx]\n - dtdx * (fluxxp[2] - fluxxm[2])\n - dtdy * (fluxyp[2] - fluxym[2])\n + dt * Influx_[2][ndx];\n\n // initialize to zero\n forceintx = 0.0;\n forcebedx = 0.0;\n forcebedx_curv = 0.0;\n forcebedy_curv = 0.0;\n forceinty = 0.0;\n forcebedy = 0.0;\n unitvx = 0.0;\n unitvy = 0.0;\n elem_eroded = 0.0;\n\n double total_gravity = sqrt(g[0][ndx]*g[0][ndx] + g[1][ndx]*g[1][ndx] + g[2][ndx]*g[2][ndx]);\n\n if(h[ndx] > tiny)\n {\n // S terms\n \tspeed_squared = VxVy[0] * VxVy[0] + VxVy[1] * VxVy[1];\n\n if (speed_squared > 0.0)\n {\n\n speed = sqrt(speed_squared);\n\n unitvx = VxVy[0] / speed;\n unitvy = VxVy[1] / speed;\n }\n else\n {\n unitvx = 0.0;\n unitvy = 0.0;\n }\n\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // x direction source terms\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n // The gravity force in the x direction\n forcegravx = g[0][ndx] * h[ndx];\n\n // The basal type friction force in x direction\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[0] * hVx[ndx] * curvature_[0][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedx = unitvx * mu * g[2][ndx] * h[ndx];\n \tforcebedx_curv = unitvx * mu * VxVy[0] * hVx[ndx] * curvature_[0][ndx];\n }\n\n // The velocity-dependent resisting force in x direction\n forceintx = unitvx * total_gravity * speed_squared * inv_xi / scale_.epsilon;\n\n //STOPPING CRITERIA\n inertial_x = fabs( Ustore[1] + dt * forcegravx );\n\n drag_x = fabs( dt * ( forceintx + forcebedx + forcebedx_curv) );\n\n //Ustore[1] = Ustore[1] + dt * (forcegravx - forcebedx - forcebedx_curv - forceintx);\n\n \n\n if ( inertial_x > drag_x )\n \tUstore[1] = Ustore[1] + dt * (forcegravx - forcebedx - forcebedx_curv - forceintx);\n else\n \tUstore[1] = 0.0;\n\n \n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // y direction source terms\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n // The gravity force in the y direction\n forcegravy = g[1][ndx] * h[ndx];\n\n // The basal friction force in y direction\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[1] * hVy[ndx] * curvature_[1][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedy = unitvy * mu * g[2][ndx] * h[ndx];\n \tforcebedy_curv = unitvy * mu * VxVy[1] * hVy[ndx] * curvature_[1][ndx];\n }\n\n // The velocity-dependent resisting force in y direction\n forceinty = unitvy * total_gravity * speed_squared * inv_xi / scale_.epsilon;\n\n //STOPPING CRITERIA\n inertial_y = fabs( Ustore[2] + dt * forcegravy );\n\n drag_y = fabs( dt * ( forceinty + forcebedy + forcebedy_curv) );\n\n //Ustore[2] = Ustore[2] + dt * (forcegravy - forcebedy - forcebedy_curv - forceinty);\n\n \n\n if ( inertial_y > drag_y )\n \tUstore[2] = Ustore[2] + dt * (forcegravy - forcebedy - forcebedy_curv - forceinty);\n \t else\n \t \tUstore[2] = 0.0;\n\n \n\n }\n\n // computation of magnitude of friction forces for statistics\n elem_forceint = speed_squared * inv_xi / scale_.epsilon;\n elem_forcebed = unitvx * forcebedx + unitvy*forcebedy;\n\n // update the state variables\n h[ndx]=Ustore[0];\n hVx[ndx]=Ustore[1];\n hVy[ndx]=Ustore[2];\n\n //end of correct\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n elem_forceint *= dxdy;\n elem_forcebed *= dxdy;\n elem_eroded *= dxdy;\n\n\n if(stoppedflags_[ndx] == 2)\n elem_deposited = h[ndx] * dxdy;\n else\n elem_deposited = 0.0;\n\n if(stoppedflags_[ndx])\n elem_eroded = 0.0;\n\n elements_[ndx].calc_shortspeed(1.0 / dt);\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n m_forceint += fabs(elem_forceint);\n m_forcebed += fabs(elem_forcebed);\n m_realvolume += dxdy * h[ndx];\n m_eroded += elem_eroded;\n m_deposited += elem_deposited;\n\n\t\t// apply bc's\n\t\tfor (int j = 0; j < 4; j++)\n\t\t\tif (neigh_proc_[j][ndx] == INIT) // this is a boundary!\n\t\t\t\tfor (int k = 0; k < NUM_STATE_VARS; k++)\n\t\t\t\t\tstate_vars_[k][ndx] = 0.0;\n\t} #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK) \\", "context_chars": 100, "text": "ocalquants_ptr->temps[iloc].resize(0);\n \t\tlocalquants_ptr->TimeInts[iloc].resize(0);\n \t}\n\n reduction(+: m_force_transx, m_force_transy, m_force_conx, m_force_cony, m_force_gx, m_force_gy) \\\n\t\treduction(+: m_force_bx, m_force_by, m_force_bcx, m_force_bcy, m_force_rx, m_force_ry) \\\n\t\treduction(+: m_power_trans, m_power_con, m_power_g, m_power_b, m_power_bc, m_power_r, m_Fr, m_vol)\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n\n if(h[ndx] > localquants_ptr->thr)\n {\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n double dxdy = dx_[0][ndx] * dx_[1][ndx];\n double dtdx = dt / dx_[0][ndx];\n double dtdy = dt / dx_[1][ndx];\n\n int xp = positive_x_side_[ndx];\n int yp = (xp + 1) % 4;\n int xm = (xp + 2) % 4;\n int ym = (xp + 3) % 4;\n\n int ivar;\n\n double fluxxp[MAX_NUM_STATE_VARS], fluxyp[MAX_NUM_STATE_VARS];\n double fluxxm[MAX_NUM_STATE_VARS], fluxym[MAX_NUM_STATE_VARS];\n\n\n ti_ndx_t nxp = node_key_ndx_[xp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxp[ivar] = node_flux_[ivar][nxp];\n\n ti_ndx_t nyp = node_key_ndx_[yp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxyp[ivar] = node_flux_[ivar][nyp];\n\n ti_ndx_t nxm = node_key_ndx_[xm + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxm[ivar] = node_flux_[ivar][nxm];\n\n ti_ndx_t nym = node_key_ndx_[ym + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxym[ivar] = node_flux_[ivar][nym];\n\n\n double VxVy[2];\n\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n double speed, speed_squared, tmp;\n double forcetransx, forcetransy;\n double forceconvectx, forceconvecty;\n double forceintx, forceinty;\n double forcebedx, forcebedy;\n double forcebedx_curv, forcebedy_curv;\n double forcegravx , forcegravy;\n double unitvx, unitvy, Local_Fr;\n\n // initialize to zero\n forcetransx = forcetransy = 0.0;\n forceintx = forcebedx = 0.0;\n forcebedx_curv = forcebedy_curv = 0.0;\n forceinty = forcebedy = 0.0;\n unitvx = unitvy = 0.0;\n\n // S terms\n // here speed is speed squared\n \tspeed_squared = VxVy[0] * VxVy[0] + VxVy[1] * VxVy[1];\n\n if (speed_squared > 0.0)\n {\n\n speed = sqrt(speed_squared);\n\n unitvx = VxVy[0] / speed;\n unitvy = VxVy[1] / speed;\n }\n else\n {\n unitvx = 0.0;\n unitvy = 0.0;\n }\n\n Local_Fr = speed / sqrt( g[2][ndx] * h[ndx] * scale_.epsilon);\n\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // x direction source terms\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n // the transient force in the x direction\n forcetransx = (hVx[ndx] - prev_state_vars_[1][ndx]) / dt;\n\n // the convective in x direction\n forceconvectx = (fluxxp[1] - fluxxm[1]) / dx_[0][ndx] + (fluxyp[1] - fluxym[1]) / dx_[1][ndx];\n\n // The gravity force in the x direction\n forcegravx = g[0][ndx] * h[ndx];\n\n // The basal friction force in x direction\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[0] * hVx[ndx] * curvature_[0][ndx], 0.0);\n if (tmp > 0.0) {\n \tforcebedx = unitvx * mu * g[2][ndx] * h[ndx];\n \tforcebedx_curv = unitvx * mu * VxVy[0] * hVx[ndx] * curvature_[0][ndx];\n }\n\n // The velocity-dependent resisting force in x direction\n forceintx = unitvx * speed_squared * inv_xi / scale_.epsilon;\n\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // y direction source terms\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n // the transient force in the y direction\n forcetransy = (hVy[ndx] - prev_state_vars_[2][ndx]) / dt;\n\n // the convective in y direction\n forceconvecty = (fluxxp[2] - fluxxm[2]) / dx_[0][ndx] + (fluxyp[2] - fluxym[2]) / dx_[1][ndx];\n\n // The gravity force in the y direction\n forcegravy = g[1][ndx] * h[ndx];\n\n // The basal friction force in y direction\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[1] * hVy[ndx] * curvature_[1][ndx], 0.0);\n if (tmp > 0.0) {\n \tforcebedy = unitvy * mu * g[2][ndx] * h[ndx];\n \tforcebedy_curv = unitvy * mu * VxVy[1] * hVy[ndx] * curvature_[1][ndx];\n }\n\n // The velocity-dependent resisting force in y direction\n forceinty = unitvy * speed_squared * inv_xi / scale_.epsilon;\n\n /////////////////////////////////////////////////////////////////////////////\n // Recording QoIs globally from updated cells at current time\n m_force_transx += (forcetransx * dxdy);\n m_force_transy += (forcetransy * dxdy);\n m_force_conx += (forceconvectx * dxdy);\n m_force_cony += (forceconvecty * dxdy);\n\t\t\tm_force_gx += (forcegravx * dxdy);\n\t\t\tm_force_gy += (forcegravy * dxdy);\n\t\t\tm_force_bx -= (forcebedx * dxdy);\n\t\t\tm_force_by -= (forcebedy * dxdy);\n\t\t\tm_force_bcx -= (forcebedx_curv * dxdy);\n\t\t\tm_force_bcy -= (forcebedy_curv * dxdy);\n\t\t\tm_force_rx -= (forceintx * dxdy);\n\t\t\tm_force_ry -= (forceinty * dxdy);\n\n\t\t\tm_power_trans += (forcetransx * VxVy[0] + forcetransy * VxVy[1]) * dxdy;\n\t\t\tm_power_con += (forceconvectx * VxVy[0] + forceconvecty * VxVy[1]) * dxdy;\n\t\t\tm_power_g += (forcegravx * VxVy[0] + forcegravy * VxVy[1]) * dxdy;\n\t\t\tm_power_b -= (forcebedx * VxVy[0] + forcebedy * VxVy[1]) * dxdy;\n\t\t\tm_power_bc -= (forcebedx_curv * VxVy[0] + forcebedy_curv * VxVy[1]) * dxdy;\n\t\t\tm_power_r -= (forceintx * VxVy[0] + forceinty * VxVy[1]) * dxdy;\n\n\t\t m_Fr += (Local_Fr * dxdy * h[ndx]);\n\t\t m_vol += dxdy * h[ndx];\n\n\t\t\t// Searching user-defined locations to record QoIs\n\t\t\tif (localquants_ptr->no_locations > 0)\n\t\t\t\tlocalquants_ptr->FindElement(dt, dx_[0][ndx], dx_[1][ndx],\n\t\t\t\t\t\tcoord_[0][ndx], coord_[1][ndx], h[ndx], hVx[ndx],\n\t\t\t\t\t\thVy[ndx], forcetransx, forcetransy, forceconvectx,\n\t\t\t\t\t\tforceconvecty, forcegravx, forcegravy, -forcebedx,\n\t\t\t\t\t\t-forcebedy, -forcebedx_curv, -forcebedy_curv, -forceintx,\n\t\t\t\t\t\t-forceinty, zeta_[0][ndx], zeta_[1][ndx], Local_Fr);\n\t\t}\n\t} #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) \\", "context_chars": 100, "text": "n(phi2);\n double mu_3 = tan(phi3);\n\n //convinience ref\n tivector *g=gravity_;\n\n reduction(+: m_forceint, m_forcebed, m_eroded, m_deposited, m_realvolume)\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n //if first order states was not updated as there is no predictor\n if(order==1)\n {\n for (int i = 0; i < NUM_STATE_VARS; i++)\n prev_state_vars_[i][ndx]=state_vars_[i][ndx];\n }\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n double elem_forceint;\n double elem_forcebed;\n double elem_eroded;\n double elem_deposited;\n\n double dxdy = dx_[0][ndx] * dx_[1][ndx];\n double dtdx = dt / dx_[0][ndx];\n double dtdy = dt / dx_[1][ndx];\n\n int xp = positive_x_side_[ndx];\n int yp = (xp + 1) % 4;\n int xm = (xp + 2) % 4;\n int ym = (xp + 3) % 4;\n\n int ivar, j, k;\n\n double fluxxp[NUM_STATE_VARS], fluxyp[NUM_STATE_VARS];\n double fluxxm[NUM_STATE_VARS], fluxym[NUM_STATE_VARS];\n\n\n ti_ndx_t nxp = node_key_ndx_[xp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxp[ivar] = node_flux_[ivar][nxp];\n\n ti_ndx_t nyp = node_key_ndx_[yp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxyp[ivar] = node_flux_[ivar][nyp];\n\n ti_ndx_t nxm = node_key_ndx_[xm + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxm[ivar] = node_flux_[ivar][nxm];\n\n ti_ndx_t nym = node_key_ndx_[ym + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxym[ivar] = node_flux_[ivar][nym];\n\n\n double VxVy[2];\n if(h[ndx] > tiny)\n {\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n }\n else\n {\n VxVy[0] = VxVy[1] = 0.0;\n }\n\n elements_[ndx].convect_dryline(VxVy[0], VxVy[1], dt); //this is necessary\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n //corrector itself\n\n double speed;\n double forceintx, forceinty;\n double forcebedx, forcebedy;\n double forcebedx_curv, forcebedy_curv;\n double forcegravx, forcegravy;\n double unitvx, unitvy, tmp;\n double Ustore[3];\n double mu_bed, Local_Fr;\n\n Ustore[0] = prev_state_vars_[0][ndx]\n - dtdx * (fluxxp[0] - fluxxm[0])\n - dtdy * (fluxyp[0] - fluxym[0])\n + dt * Influx_[0][ndx];\n Ustore[0] = c_dmax1(Ustore[0], 0.0);\n\n Ustore[1] = prev_state_vars_[1][ndx]\n - dtdx * (fluxxp[1] - fluxxm[1])\n - dtdy * (fluxyp[1] - fluxym[1])\n + dt * Influx_[1][ndx];\n\n Ustore[2] = prev_state_vars_[2][ndx]\n - dtdx * (fluxxp[2] - fluxxm[2])\n - dtdy * (fluxyp[2] - fluxym[2])\n + dt * Influx_[2][ndx];\n\n // initialize to zero\n forceintx = 0.0;\n forcebedx = 0.0;\n forcebedx_curv = 0.0;\n forcebedy_curv = 0.0;\n forceinty = 0.0;\n forcebedy = 0.0;\n unitvx = 0.0;\n unitvy = 0.0;\n elem_eroded = 0.0;\n mu_bed = 0.0;\n \n if(h[ndx] > tiny)\n {\n \tdouble inertial_x,inertial_y,drag_x, drag_y;\n // S terms\n // here speed is speed squared\n speed = VxVy[0] * VxVy[0] + VxVy[1] * VxVy[1];\n if (speed > 0.0)\n {\n // here speed is speed\n speed = sqrt(speed);\n unitvx = VxVy[0] / speed;\n unitvy = VxVy[1] / speed;\n }\n else\n {\n unitvx = 0.0;\n unitvy = 0.0;\n }\n // Calculation of Froude number\n Local_Fr = speed / sqrt( g[2][ndx] * h[ndx] * scale_.epsilon);\n\n //ccccccccccccccc Calculation of mu_bed(Local_Fr,h) ccccccccccccccccc\n\n //Dynamic flow regime\n\t\t\tif ( Local_Fr >= Beta )\n\t\t\t\tmu_bed = mu_1 + ( mu_2 - mu_1 ) / ( 1.0 + h[ndx] * Beta / ( L_material * Local_Fr ) );\n\n //Intermediate flow regime\n\t\t\telse if ( ( Local_Fr < Beta ) && ( Local_Fr > 0.0 ) )\n\t\t\t\tmu_bed = mu_3 + pow( ( Local_Fr / Beta ), 0.001 ) * ( mu_1 - mu_3 ) + ( mu_2 - mu_1 ) / ( 1.0 + h[ndx] / L_material );\n\n //Static regime\n\t\t\telse if ( Local_Fr == 0.0 )\n\t\t\t\tmu_bed = mu_3 + ( mu_2 - mu_1 ) / ( 1.0 + h[ndx] / L_material);\n\n\t\t\t//ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\t\t\t// x direction source terms\n\t\t\t//ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n\t\t\t// The gravity force in the x direction\n\t\t\tforcegravx = g[0][ndx] * h[ndx];\n\n\t\t\t// The basal friction forces in x direction\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[0] * hVx[ndx] * curvature_[0][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedx = unitvx * mu_bed * g[2][ndx] * h[ndx];\n \tforcebedx_curv = unitvx * mu_bed * VxVy[0] * hVx[ndx] * curvature_[0][ndx];\n }\n\n // The resisting forces due to flow thickness gradient in x direction\n\t\t\tforceintx = h[ndx] * g[2][ndx] * dh_dx[ndx] * scale_.epsilon;\n\n\t\t\t//STOPPING CRITERIA\n\t\t\tinertial_x = fabs( Ustore[1] + dt * forcegravx );\n\n\t\t\tdrag_x = fabs( dt * ( forcebedx + forcebedx_curv + forceintx ) );\n\n\t\t\tif ( inertial_x > drag_x )\n\t\t\t\tUstore[1] = Ustore[1] + dt * ( forcegravx - forcebedx - forcebedx_curv - forceintx );\n\t\t\telse\n\t\t\t\tUstore[1] = 0.0;\n\n\t\t\t//cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\t\t\t// y direction source terms\n\t\t\t//cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n\t\t\t// The gravity force in the y direction\n\t\t\tforcegravy = g[1][ndx] * h[ndx];\n\n\t\t\t// The basal friction forces in y direction\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[1] * hVy[ndx] * curvature_[1][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedy = unitvy * mu_bed * g[2][ndx] * h[ndx];\n \tforcebedy_curv = unitvy * mu_bed * VxVy[1] * hVy[ndx] * curvature_[1][ndx];\n }\n\n // The resisting forces due to flow thickness gradient in y direction\n\t\t\tforceinty = h[ndx] * g[2][ndx] * dh_dy[ndx] * scale_.epsilon;\n\n\t\t\t//STOPPING CRITERIA\n\t\t\tinertial_y = fabs( Ustore[2] + dt * forcegravy );\n\n\t\t\tdrag_y = fabs( dt * ( forcebedy + forcebedy_curv + forceinty ) );\n\n\t\t\tif ( inertial_y > drag_y )\n\t\t\t\tUstore[2] = Ustore[2] + dt * ( forcegravy - forcebedy - forcebedy_curv - forceinty );\n\t\t\telse\n\t\t\t\tUstore[2] = 0.0;\n }\n\n\n // computation of magnitude of friction forces for statistics\n elem_forceint = unitvx * forceintx + unitvy*forceinty;\n elem_forcebed = unitvx * forcebedx + unitvy*forcebedy;\n\n // update the state variables\n h[ndx]=Ustore[0];\n hVx[ndx]=Ustore[1];\n hVy[ndx]=Ustore[2];\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n elem_forceint *= dxdy;\n elem_forcebed *= dxdy;\n elem_eroded *= dxdy;\n\n\n if(stoppedflags_[ndx] == 2)\n elem_deposited = h[ndx] * dxdy;\n else\n elem_deposited = 0.0;\n\n if(stoppedflags_[ndx])\n elem_eroded = 0.0;\n\n elements_[ndx].calc_shortspeed(1.0 / dt);\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n\t\tm_forceint += fabs(elem_forceint);\n\t\tm_forcebed += fabs(elem_forcebed);\n\t\tm_realvolume += dxdy * h[ndx];\n\t\tm_eroded += elem_eroded;\n\t\tm_deposited += elem_deposited;\n\n\t\t// apply bc's\n\t\tfor (int j = 0; j < 4; j++)\n\t\t\tif (neigh_proc_[j][ndx] == INIT) // this is a boundary!\n\t\t\t\tfor (int k = 0; k < NUM_STATE_VARS; k++)\n\t\t\t\t\tstate_vars_[k][ndx] = 0.0;\n\t} #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK) \\", "context_chars": 100, "text": "ocalquants_ptr->temps[iloc].resize(0);\n \t\tlocalquants_ptr->TimeInts[iloc].resize(0);\n \t}\n\n reduction(+: m_force_transx, m_force_transy, m_force_conx, m_force_cony, m_force_gx, m_force_gy) \\\n\t\treduction(+: m_force_bx, m_force_by, m_force_bcx, m_force_bcy, m_force_rx, m_force_ry) \\\n\t\treduction(+: m_power_con, m_power_g, m_power_b, m_power_bc, m_power_r, m_Fr, m_vol)\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n\n if(h[ndx] > localquants_ptr->thr)\n {\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n double dxdy = dx_[0][ndx] * dx_[1][ndx];\n double dtdx = dt / dx_[0][ndx];\n double dtdy = dt / dx_[1][ndx];\n\n int xp = positive_x_side_[ndx];\n int yp = (xp + 1) % 4;\n int xm = (xp + 2) % 4;\n int ym = (xp + 3) % 4;\n\n int ivar;\n\n double fluxxp[MAX_NUM_STATE_VARS], fluxyp[MAX_NUM_STATE_VARS];\n double fluxxm[MAX_NUM_STATE_VARS], fluxym[MAX_NUM_STATE_VARS];\n\n\n ti_ndx_t nxp = node_key_ndx_[xp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxp[ivar] = node_flux_[ivar][nxp];\n\n ti_ndx_t nyp = node_key_ndx_[yp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxyp[ivar] = node_flux_[ivar][nyp];\n\n ti_ndx_t nxm = node_key_ndx_[xm + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxm[ivar] = node_flux_[ivar][nxm];\n\n ti_ndx_t nym = node_key_ndx_[ym + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxym[ivar] = node_flux_[ivar][nym];\n\n\n double VxVy[2];\n\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n double speed;\n double forcetransx, forcetransy;\n double forceconvectx, forceconvecty;\n double forceintx, forceinty;\n double forcebedx, forcebedy;\n double forcebedx_curv, forcebedy_curv;\n double forcegravx , forcegravy;\n double unitvx, unitvy, tmp;\n double mu_bed, Local_Fr;\n\n // initialize to zero\n forcetransx = forcetransy = 0.0;\n forceconvectx = forceconvecty = 0.0;\n forceintx = forcebedx = 0.0;\n forcebedx_curv = forcebedy_curv = 0.0;\n forceinty = forcebedy = 0.0;\n unitvx = unitvy = 0.0;\n\n // S terms\n // here speed is speed squared\n \tspeed = VxVy[0] * VxVy[0] + VxVy[1] * VxVy[1];\n\n if (speed > 0.0)\n {\n speed = sqrt(speed);\n\n unitvx = VxVy[0] / speed;\n unitvy = VxVy[1] / speed;\n }\n else\n {\n unitvx = 0.0;\n unitvy = 0.0;\n }\n Local_Fr = speed / sqrt( g[2][ndx] * h[ndx] * scale_.epsilon);\n\n //ccccccccccccccc Calculation of mu_bed(Local_Fr,h) ccccccccccccccccc\n\n //Dynamic flow regime\n\t\t\tif ( Local_Fr >= Beta )\n\t\t\t\tmu_bed = mu_1 + ( mu_2 - mu_1 ) / ( 1.0 + h[ndx] * Beta / ( L_material * Local_Fr ) );\n\n //Intermediate flow regime\n\t\t\telse if ( ( Local_Fr < Beta ) && ( Local_Fr > 0.0 ) )\n\t\t\t\tmu_bed = mu_3 + pow( ( Local_Fr / Beta ), 0.001 ) * ( mu_1 - mu_3 ) + ( mu_2 - mu_1 ) / ( 1.0 + h[ndx] / L_material );\n\n //Static regime\n\t\t\telse if ( Local_Fr == 0.0 )\n\t\t\t\tmu_bed = mu_3 + ( mu_2 - mu_1 ) / ( 1.0 + h[ndx] / L_material);\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // x direction source terms\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n // the transient force in the x direction\n forcetransx = (hVx[ndx] - prev_state_vars_[1][ndx]) / dt;\n\n // the convective in x direction\n forceconvectx = (fluxxp[1] - fluxxm[1]) / dx_[0][ndx] + (fluxyp[1] - fluxym[1]) / dx_[1][ndx];\n\n\t\t\t// The gravity force in the x direction\n\t\t\tforcegravx = g[0][ndx] * h[ndx];\n\n\t\t\t// The basal friction forces in x direction\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[0] * hVx[ndx] * curvature_[0][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedx = unitvx * mu_bed * g[2][ndx] * h[ndx];\n \tforcebedx_curv = unitvx * mu_bed * VxVy[0] * hVx[ndx] * curvature_[0][ndx];\n }\n\n // The resisting forces due to flow thickness gradient in x direction\n\t\t\tforceintx = h[ndx] * g[2][ndx] * dh_dx[ndx] * scale_.epsilon;\n\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // y direction source terms\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n\n // the transient force in the y direction\n forcetransy = (hVy[ndx] - prev_state_vars_[2][ndx]) / dt;\n\n // the convective in y direction\n forceconvecty = (fluxxp[2] - fluxxm[2]) / dx_[0][ndx] + (fluxyp[2] - fluxym[2]) / dx_[1][ndx];\n\n\t\t\t// The gravity force in the y direction\n\t\t\tforcegravy = g[1][ndx] * h[ndx];\n\n\t\t\t// The basal friction forces in y direction\n tmp = c_dmax1(g[2][ndx] * h[ndx] + VxVy[1] * hVy[ndx] * curvature_[1][ndx], 0.0);\n if (tmp > 0.0){\n \tforcebedy = unitvy * mu_bed * g[2][ndx] * h[ndx];\n \tforcebedy_curv = unitvy * mu_bed * VxVy[1] * hVy[ndx] * curvature_[1][ndx];\n }\n\n // The resisting forces due to flow thickness gradient in y direction\n\t\t\tforceinty = h[ndx] * g[2][ndx] * dh_dy[ndx] * scale_.epsilon;\n\n /////////////////////////////////////////////////////////////////////////////\n // Recording QoIs globally from updated cells at current time\n m_force_transx += (forcetransx * dxdy);\n m_force_transy += (forcetransy * dxdy);\n m_force_conx += (forceconvectx * dxdy);\n m_force_cony += (forceconvecty * dxdy);\n\t\t\tm_force_gx += (forcegravx * dxdy);\n\t\t\tm_force_gy += (forcegravy * dxdy);\n\t\t\tm_force_bx -= (forcebedx * dxdy);\n\t\t\tm_force_by -= (forcebedy * dxdy);\n\t\t\tm_force_bcx -= (forcebedx_curv * dxdy);\n\t\t\tm_force_bcy -= (forcebedy_curv * dxdy);\n\t\t\tm_force_rx -= (forceintx * dxdy);\n\t\t\tm_force_ry -= (forceinty * dxdy);\n\n\t\t\tm_power_trans += (forcetransx * VxVy[0] + forcetransy * VxVy[1]) * dxdy;\n\t\t\tm_power_con += (forceconvectx * VxVy[0] + forceconvecty * VxVy[1]) * dxdy;\n\t\t\tm_power_g += (forcegravx * VxVy[0] + forcegravy * VxVy[1]) * dxdy;\n\t\t\tm_power_b -= (forcebedx * VxVy[0] + forcebedy * VxVy[1]) * dxdy;\n\t\t\tm_power_bc -= (forcebedx_curv * VxVy[0] + forcebedy_curv * VxVy[1]) * dxdy;\n\t\t\tm_power_r -= (forceintx * VxVy[0] + forceinty * VxVy[1]) * dxdy;\n\n\t\t m_Fr += (Local_Fr * dxdy * h[ndx]);\n\t\t m_vol += dxdy * h[ndx];\n\n\t\t\t// Searching user-defined locations to record QoIs\n\t\t\tif (localquants_ptr->no_locations > 0)\n\t\t\t\tlocalquants_ptr->FindElement(dt, dx_[0][ndx], dx_[1][ndx],\n\t\t\t\t\t\tcoord_[0][ndx], coord_[1][ndx], h[ndx], hVx[ndx],\n\t\t\t\t\t\thVy[ndx], forcetransx, forcetransy, forceconvectx,\n\t\t\t\t\t\tforceconvecty, forcegravx, forcegravy, -forcebedx,\n\t\t\t\t\t\t-forcebedy, -forcebedx_curv, -forcebedy_curv, -forceintx,\n\t\t\t\t\t\t-forceinty, zeta_[0][ndx], zeta_[1][ndx], Local_Fr);\n\t\t}\n\t} #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_MIDIUM_CHUNK) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) \\", "context_chars": 100, "text": " a dependency in the Element class that causes incorrect\n // results\n reduction(+: m_forceint, m_forcebed, m_eroded, m_deposited, m_realvolume)\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n //if first order states was not updated as there is no predictor\n if(order==1)\n {\n for (int i = 0; i < NUM_STATE_VARS; i++)\n prev_state_vars_[i][ndx]=state_vars_[i][ndx];\n }\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n double elem_forceint;\n double elem_forcebed;\n double elem_eroded;\n double elem_deposited;\n\n double dxdy = dx_[0][ndx] * dx_[1][ndx];\n double dtdx = dt / dx_[0][ndx];\n double dtdy = dt / dx_[1][ndx];\n\n int xp = positive_x_side_[ndx];\n int yp = (xp + 1) % 4;\n int xm = (xp + 2) % 4;\n int ym = (xp + 3) % 4;\n\n int ivar, j, k;\n\n double fluxxp[NUM_STATE_VARS], fluxyp[NUM_STATE_VARS];\n double fluxxm[NUM_STATE_VARS], fluxym[NUM_STATE_VARS];\n\n\n ti_ndx_t nxp = node_key_ndx_[xp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxp[ivar] = node_flux_[ivar][nxp];\n\n ti_ndx_t nyp = node_key_ndx_[yp + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxyp[ivar] = node_flux_[ivar][nyp];\n\n ti_ndx_t nxm = node_key_ndx_[xm + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxxm[ivar] = node_flux_[ivar][nxm];\n\n ti_ndx_t nym = node_key_ndx_[ym + 4][ndx];\n for(ivar = 0; ivar < NUM_STATE_VARS; ivar++)\n fluxym[ivar] = node_flux_[ivar][nym];\n\n\n /* the values being passed to correct are for a SINGLE element, NOT a\n region, as such the only change that having variable bedfriction\n requires is to pass the bedfriction angle for the current element\n rather than the only bedfriction\n I wonder if this is legacy code, it seems odd that it is only called\n for the SUN Operating System zee ../geoflow/correct.f */\n\n//#ifdef STOPPED_FLOWS\n #ifdef STOPCRIT_CHANGE_SOURCE\n int IF_STOPPED=stoppedflags_[ndx];\n #else\n int IF_STOPPED = !(!stoppedflags_[ndx]);\n \n//\n double g[3]{gravity_[0][ndx],gravity_[1][ndx],gravity_[2][ndx]};\n double d_g[3]{d_gravity_[0][ndx],d_gravity_[1][ndx],d_gravity_[2][ndx]};\n\n\n int i;\n double kactxy[DIMENSION];\n double bedfrict = effect_bedfrict_[ndx];\n double Vfluid[DIMENSION];\n double volf;\n\n\n if(h[ndx] > GEOFLOW_TINY)\n {\n for(i = 0; i < DIMENSION; i++)\n kactxy[i] = effect_kactxy_[i][ndx];\n\n // fluid velocities\n Vfluid[0] = hVx_liq[ndx] / h[ndx];\n Vfluid[1] = hVy_liq[ndx] / h[ndx];\n\n // volume fractions\n volf = h_liq[ndx] / h[ndx];\n }\n else\n {\n for(i = 0; i < DIMENSION; i++)\n {\n kactxy[i] = matprops2_ptr->scale.epsilon;\n Vfluid[i] = 0.;\n }\n volf = 1.;\n bedfrict = matprops2_ptr->bedfrict[material_[ndx]];\n }\n\n double Vsolid[DIMENSION];\n if(h_liq[ndx] > GEOFLOW_TINY)\n {\n Vsolid[0] = hVx_sol[ndx] / h_liq[ndx];\n Vsolid[1] = hVy_sol[ndx] / h_liq[ndx];\n }\n else\n {\n Vsolid[0] = Vsolid[1] = 0.0;\n }\n\n double V_avg[DIMENSION];\n V_avg[0] = Vsolid[0] * volf + Vfluid[0] * (1. - volf);\n V_avg[1] = Vsolid[1] * volf + Vfluid[1] * (1. - volf);\n elements_[ndx].convect_dryline(V_avg[0],V_avg[1], dt); //this is necessary\n\n double curv_x=curvature_[0][ndx];\n double curv_y=curvature_[1][ndx];\n double xslope=zeta_[0][ndx];\n double yslope=zeta_[1][ndx];\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n //corrector\n double speed;\n double forceintx, forceinty;\n double forcebedx, forcebedy;\n double forcebedmax, forcebedequil, forcegrav;\n double unitvx, unitvy;\n double den_frac;\n double alphaxx, alphayy, alphaxy, alphaxz, alphayz;\n double tanbed;\n\n double Ustore[6];\n double h_inv, hphi_inv;\n double sgn_dudy, sgn_dvdx, tmp;\n double slope;\n double t1, t2, t3, t4, t5;\n double es, totalShear;\n double drag[4];\n\n // initialize to zero\n forceintx = 0.0;\n forcebedx = 0.0;\n forceinty = 0.0;\n forcebedy = 0.0;\n unitvx = 0.0;\n unitvy = 0.0;\n elem_eroded = 0.0;\n\n slope = sqrt(xslope * xslope + yslope * yslope);\n den_frac = den_fluid / den_solid;\n for (i = 0; i < 6; ++i)\n Ustore[i] = prev_state_vars_[i][ndx] + dt * Influx_[i][ndx]\n - dtdx * (fluxxp[i] - fluxxm[i])\n - dtdy * (fluxyp[i] - fluxym[i]);\n\n if (Ustore[0] > tiny)\n {\n // Source terms ...\n // here speed is speed squared\n speed = Vsolid[0] * Vsolid[0] + Vsolid[1] * Vsolid[1];\n if (speed > 0.0)\n {\n // here speed is speed\n speed = sqrt(speed);\n unitvx = Vsolid[0] / speed;\n unitvy = Vsolid[1] / speed;\n }\n else\n {\n unitvx = 0.0;\n unitvy = 0.0;\n }\n tanbed = tan(bedfrict);\n h_inv = 1.0 / h[ndx];\n hphi_inv = 1.0 / h_liq[ndx];\n alphaxx = kactxy[0];\n alphayy = kactxy[0];\n den_frac = den_fluid / den_solid;\n calc_drag_force(ndx, Vsolid, Vfluid, den_solid, den_fluid,terminal_vel, drag);\n\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // solid fraction x-direction source terms\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // alphaxy -- see pitman-le (2005)\n tmp = hphi_inv * (dhVx_sol_dy[ndx] - Vsolid[0] * dh_liq_dy[ndx]);\n sgn_dudy = sgn_tiny(tmp, frict_tiny);\n alphaxy = sgn_dudy * sin_intfrictang * kactxy[0];\n\n // alphaxz (includes centrifugal effects)\n alphaxz = -unitvx * tanbed\n * (1.0 + (Vsolid[0] * Vsolid[0]) * curv_x / g[2]);\n\n // evaluate t1\n t1 = (1.0 - den_frac)\n * (-alphaxx * xslope - alphaxy * yslope + alphaxz)\n * h_liq[ndx] * g[2];\n // evaluate t2\n t2 = epsilon * den_frac * h_liq[ndx] * g[2] * dh_dx[ndx];\n // evaluate t3\n t3 = epsilon * den_frac * h_liq[ndx] * g[2] * xslope;\n // evaluate t4\n t4 = h_liq[ndx] * g[0];\n // evaluate drag\n t5 = drag[0];\n // update Ustore\n Ustore[2] = Ustore[2] + dt * (t1 - t2 - t3 + t4 + t5);\n\n //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // solid fraction y-direction source terms\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // alphaxy -- see pitman-le (2005) for definitions\n tmp = hphi_inv * (dhVy_sol_dx[ndx] - Vsolid[1] * dh_liq_dx[ndx]);\n sgn_dvdx = sgn_tiny(tmp, frict_tiny);\n alphaxy = sgn_dvdx * sin_intfrictang * kactxy[0];\n\n // alphayz\n alphayz = -unitvy * tanbed\n * (1.0 + (Vsolid[1] * Vsolid[1]) * curv_y / g[2]);\n\n // evaluate t1\n t1 = (1.0 - den_frac)\n * (-alphaxy * xslope - alphayy * yslope + alphayz)\n * h_liq[ndx] * g[2];\n // evaluate t2\n t2 = epsilon * den_frac * h_liq[ndx] * dh_dy[ndx];\n // evaluate t3\n t3 = epsilon * den_frac * h_liq[ndx] * g[2] * yslope;\n // evaluate t4 ( gravity along y-dir )\n t4 = h_liq[ndx] * g[1];\n // drag term\n t5 = drag[1];\n Ustore[3] = Ustore[3] + dt * (t1 - t2 - t3 + t4 + t5);\n\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // fluid fraction x-direction source terms\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // gravity on fluid\n t4 = h[ndx] * g[0];\n // drag force on fluid\n t5 = drag[2];\n Ustore[4] = Ustore[4] + dt * (t4 - t5);\n\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // fluid fraction y-direction source terms\n //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n // gravity on fluid\n t4 = h[ndx] * g[1];\n // drag force on fluid\n t5 = drag[3];\n Ustore[5] = Ustore[5] + dt * (t4 - t5);\n }\n\n // computation of magnitude of friction forces for statistics\n elem_forceint = unitvx * forceintx + unitvy * forceinty;\n elem_forcebed = unitvx * forcebedx + unitvy * forcebedy;\n\n // update the state variables\n for (i = 0; i < 6; ++i)\n state_vars_[i][ndx]=Ustore[i];\n\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n elem_forceint *= dxdy;\n elem_forcebed *= dxdy;\n elem_eroded *= dxdy;\n\n\n if(stoppedflags_[ndx] == 2)\n elem_deposited = h[ndx] * dxdy;\n else\n elem_deposited = 0.0;\n\n if(stoppedflags_[ndx])\n elem_eroded = 0.0;\n\n elements_[ndx].calc_shortspeed(1.0 / dt);\n\n\n //correct(elementType, NodeTable, ElemTable, dt, matprops_ptr, fluxprops_ptr, timeprops_ptr, this, Curr_El_out, &elemforceint,\n // &elemforcebed, &elemeroded, &elemdeposited);\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n\n m_forceint += fabs(elem_forceint);\n m_forcebed += fabs(elem_forcebed);\n m_realvolume += dxdy * h[ndx];\n m_eroded += elem_eroded;\n m_deposited += elem_deposited;\n\n // apply bc's\n for(int j = 0; j < 4; j++)\n if(neigh_proc_[j][ndx] == INIT) // this is a boundary!\n for(int k = 0; k < NUM_STATE_VARS; k++)\n state_vars_[k][ndx]=0.0;\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/integrators.C", "omp_pragma_line": "#pragma omp parallel for \\", "context_chars": 100, "text": ";\n double VxVy[2];\n double dt2 = .5 * dt; // dt2 is set as dt/2 !\n Element* Curr_El;\n ////private(currentPtr,Curr_El,IF_STOPPED,influx,j,k,curr_time,flux_src_coef,VxVy)\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n Curr_El = &(elements_[ndx]);\n elements_[ndx].update_prev_state_vars();\n influx[0] = Curr_El->Influx(0);\n influx[1] = Curr_El->Influx(1);\n influx[2] = Curr_El->Influx(2);\n //note, now there is no check for fluxes from non-local elements\n if(!(influx[0] >= 0.0))\n {\n printf(\"negative influx=%g\\n\", influx[0]);\n assert(0);\n }\n // -- calc contribution of flux source\n curr_time = (timeprops_ptr->cur_time) * (timeprops_ptr->TIME_SCALE);\n //VxVy[2];\n if(Curr_El->state_vars(0) > GEOFLOW_TINY)\n {\n VxVy[0] = Curr_El->state_vars(1) / Curr_El->state_vars(0);\n VxVy[1] = Curr_El->state_vars(2) / Curr_El->state_vars(0);\n }\n else\n VxVy[0] = VxVy[1] = 0.0;\n#ifdef STOPCRIT_CHANGE_SOURCE\n IF_STOPPED=Curr_El->stoppedflags();\n#else\n IF_STOPPED = !(!(Curr_El->stoppedflags()));\n\n double gravity[3]{Curr_El->gravity(0),Curr_El->gravity(1),Curr_El->gravity(2)};\n double d_gravity[3]{Curr_El->d_gravity(0),Curr_El->d_gravity(1),Curr_El->d_gravity(2)};\n if(elementType == ElementType::TwoPhases)\n {\n //nothing there\n }\n if(elementType == ElementType::SinglePhase)\n {\n predict(Curr_El,\n Curr_El->dh_dx(), Curr_El->dhVx_dx(), Curr_El->dhVy_dx(),\n Curr_El->dh_dy(), Curr_El->dhVx_dy(), Curr_El->dhVy_dy(),\n tiny, Curr_El->kactxy(0), dt2, gravity, Curr_El->curvature(0), Curr_El->curvature(1),\n matprops_ptr->bedfrict[Curr_El->material()], matprops_ptr->intfrict,\n d_gravity, matprops_ptr->frict_tiny, order, VxVy, IF_STOPPED, influx);\n }\n // apply bc's\n for(int j = 0; j < 4; j++)\n if(Curr_El->neigh_proc(j) == INIT) // this is a boundary!\n for(int k = 0; k < NUM_STATE_VARS; k++)\n Curr_El->state_vars(k,0.0);\n } #pragma omp parallel for \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/stats.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) \\", "context_chars": 100, "text": "NOTATE_SITE_BEGIN(StatProps_calc_stats);\n //ANNOTATE_TASK_BEGIN(StatProps_calc_stats_loop);\n\n reduction(min:m_x_min,m_y_min,testpointmindist2) \\\n reduction(max:m_x_max,m_y_max,m_v_max,testpointreach) \\\n reduction(max:m_max_height) \\\n reduction(+:xC,yC,rC,m_area,m_v_ave,m_vx_ave,m_vy_ave)\\\n\t\treduction(+:m_Vol_,m_Area_,m_Velmean_) \\\n reduction(+:m_piler2,testvolume,m_xVar,m_yVar) /*m_slope_ave,m_slopevolume*/\n for(ti_ndx_t ndx = 0; ndx < N; ndx++)\n {\n if(adapted_[ndx] > 0 && myid == myprocess_[ndx])\n {\n Element* Curr_El = &(ElemTable->elenode_[ndx]);\n if(discharge->num_planes>0)\n {\n //calculate volume passing through \"discharge planes\"\n double nodescoord[9][2];\n Node* node;\n\n for(int inode = 0; inode < 8; inode++)\n {\n nodescoord[inode][0] = coord_[0][node_key_ndx_[inode][ndx]];\n nodescoord[inode][1] = coord_[1][node_key_ndx_[inode][ndx]];\n }\n nodescoord[8][0] = coord_[0][ndx];\n nodescoord[8][1] = coord_[1][ndx];\n\n discharge->update(nodescoord, hVx[ndx], hVy[ndx], d_time);\n }\n\n // rule out non physical fast moving thin layers\n //if(state_vars_[0][ndx] >= cutoffheight){\n\n if(h[ndx] > min_height)\n {\n\n if(h[ndx] > m_max_height)\n m_max_height = h[ndx];\n\n if(h[ndx] >= hxyminmax)\n {\n m_x_min = min(m_x_min,coord_[0][ndx]);\n m_x_max = max(m_x_max,coord_[0][ndx]);\n m_y_min = min(m_y_min,coord_[1][ndx]);\n m_y_max = max(m_y_max,coord_[1][ndx]);\n }\n\n //to test if pileheight of depth testpointheight\n //has reached the testpoint\n double testpointdist2 = (coord_[0][ndx] - testpointx) * (coord_[0][ndx] - testpointx)\n + (coord_[1][ndx] - testpointy) * (coord_[1][ndx] - testpointy);\n\n if(testpointdist2 < testpointmindist2)\n {\n testpointmindist2 = testpointdist2;\n testpointreach = h[ndx] >= testpointheight;\n }\n\n double dA = dx_[0][ndx] * dx_[1][ndx];\n m_area += dA;\n double dVol = state_vars_[0][ndx] * dA;\n testvolume += dVol;\n xC += coord_[0][ndx] * dVol;\n yC += coord_[1][ndx] * dVol;\n\n m_xVar += coord_[0][ndx] * coord_[0][ndx] * dVol;\n m_yVar += coord_[1][ndx] * coord_[1][ndx] * dVol;\n m_piler2 += (coord_[0][ndx] * coord_[0][ndx] + coord_[1][ndx] * coord_[1][ndx]) * dVol;\n rC += sqrt((coord_[0][ndx] - m_xCen) * (coord_[0][ndx] - m_xCen) + (coord_[1][ndx] - m_yCen) * (coord_[1][ndx] - m_yCen)) * dVol;\n\n\n m_v_ave += sqrt(hVx[ndx] * hVx[ndx] + hVy[ndx] * hVy[ndx]) * dA;\n\n double VxVy[2];\n Curr_El->eval_velocity(0.0, 0.0, VxVy);\n\n if(elementType == ElementType::TwoPhases)\n {\n if(std::isnan(m_v_ave)||std::isnan(state_vars_[0][ndx])||std::isnan(state_vars_[1][ndx])||std::isnan(state_vars_[2][ndx])\n ||std::isnan(state_vars_[3][ndx])||std::isnan(state_vars_[4][ndx])||std::isnan(state_vars_[5][ndx]))\n {\n //v_ave is NaN\n cout<<\"calc_stats(): NaN detected in element={\"<key_[ndx]<<\"} at iter=\"<iter<<\"\\n\";\n printf(\"prevu={%12.6g,%12.6g,%12.6g,%12.6g,%12.6g,%12.6g}\\n\",\n prev_state_vars_[0][ndx], prev_state_vars_[1][ndx],\n prev_state_vars_[2][ndx], prev_state_vars_[3][ndx],\n prev_state_vars_[4][ndx], prev_state_vars_[5][ndx]);\n printf(\" u={%12.6g,%12.6g,%12.6g,%12.6g,%12.6g,%12.6g}\\n\", state_vars_[0][ndx], state_vars_[1][ndx],\n state_vars_[2][ndx], state_vars_[3][ndx], state_vars_[4][ndx], state_vars_[5][ndx]);\n printf(\"prev {Vx_s, Vy_s, Vx_f, Vy_f}={%12.6g,%12.6g,%12.6g,%12.6g}\\n\",\n prev_state_vars_[2][ndx] / (prev_state_vars_[1][ndx]),\n prev_state_vars_[3][ndx] / (prev_state_vars_[1][ndx]),\n prev_state_vars_[4][ndx] / (prev_state_vars_[0][ndx]),\n prev_state_vars_[5][ndx] / (prev_state_vars_[0][ndx]));\n printf(\"this {Vx_s, Vy_s, Vx_f, Vy_f}={%12.6g,%12.6g,%12.6g,%12.6g}\\n\",\n state_vars_[2][ndx] / state_vars_[1][ndx], state_vars_[3][ndx] / state_vars_[1][ndx],\n state_vars_[4][ndx] / state_vars_[0][ndx], state_vars_[5][ndx] / state_vars_[0][ndx]);\n ElemBackgroundCheck2(ElemTable, NodeTable, &(ElemTable->elenode_[ndx]), stdout);\n assert(0);\n }\n }\n if(elementType == ElementType::SinglePhase)\n {\n if(std::isnan(m_v_ave)||std::isnan(state_vars_[0][ndx])||std::isnan(state_vars_[1][ndx])||std::isnan(state_vars_[2][ndx]))\n {\n //v_ave is NaN\n\n cout<<\"calc_stats(): NaN detected in element={\"<key_[ndx]<<\"} at iter=\"<iter<<\"\\n\";\n printf(\"prevu={%12.6g,%12.6g,%12.6g}\\n\", prev_state_vars_[0][ndx],\n prev_state_vars_[1][ndx], prev_state_vars_[2][ndx]);\n printf(\" u={%12.6g,%12.6g,%12.6g}\\n\", state_vars_[0][ndx], state_vars_[1][ndx], state_vars_[2][ndx]);\n printf(\"prev {hVx/h,hVy/h}={%12.6g,%12.6g}\\n\",\n prev_state_vars_[1][ndx] / prev_state_vars_[0][ndx],\n prev_state_vars_[2][ndx] / prev_state_vars_[0][ndx]);\n printf(\"this {hVx/h,hVy/h}={%12.6g,%12.6g}\\n\", state_vars_[1][ndx] / state_vars_[0][ndx],\n state_vars_[2][ndx] / state_vars_[0][ndx]);\n ElemBackgroundCheck2(ElemTable, NodeTable, &(ElemTable->elenode_[ndx]), stdout);\n assert(0);\n }\n }\n\n\n double temp = sqrt(VxVy[0] * VxVy[0] + VxVy[1] * VxVy[1]);\n m_v_max = max(m_v_max,temp);\n m_vx_ave += hVx[ndx] * dA;\n m_vy_ave += hVy[ndx] * dA;\n\n\n\n //these are garbage, Bin Yu wanted them when he was trying to come up\n //with a global stopping criteria (to stop the calculation, not the pile)\n //volume averaged slope in the direction of velocity\n //a negative number means the flow is headed uphill\n /*double xslope = 0, yslope = 0;\n\n Get_slope(resolution, coord_[0][ndx] * matprops->scale.length,\n coord_[1][ndx] * matprops->scale.length, xslope, yslope);\n if(temp > GEOFLOW_TINY)\n {\n m_slope_ave += -(hVx[ndx] * xslope + hVy[ndx] * yslope) * dA / temp;\n m_slopevolume += dVol;\n }*/\n }\n\n if (_LocalQuants->no_locations > 0 && h[ndx] > _LocalQuants->thr)\n {\n \tdA_ = dx_[0][ndx] * dx_[1][ndx];\n \tm_Area_ += dA_;\n \tm_Vol_ += h[ndx] * dA_;\n \tm_Velmean_ += sqrt(hVx[ndx] * hVx[ndx] + hVy[ndx] * hVy[ndx]) * dA_;\n }\n }\n }\n\n //ANNOTATE_TASK_END(StatProps_calc_stats_loop);\n //ANNOTATE_SITE_END(StatProps_calc_stats);\n#ifdef USE_MPI\n MPI_Reduce(&m_x_min, xyminmax, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);\n MPI_Reduce(&m_x_max, xyminmax+1, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);\n MPI_Reduce(&m_y_min, xyminmax+2, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);\n MPI_Reduce(&m_y_max, xyminmax+3, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);\n#else //USE_MPI\n xyminmax[0]=m_x_min;\n xyminmax[1]=m_x_max;\n xyminmax[2]=m_y_min;\n xyminmax[3]=m_y_max;\n //USE_MPI\n if(myid == 0)\n {\n xyminmax[0] *= matprops->scale.length;\n xyminmax[1] *= matprops->scale.length;\n xyminmax[2] *= matprops->scale.length;\n xyminmax[3] *= matprops->scale.length;\n }\n\n int inttempout;\n double tempin[17], tempout[17], temp2in[2], temp2out[2];\n\n //find the minimum distance (squared) to the test point\n#ifdef USE_MPI\n MPI_Allreduce(&testpointmindist2, tempout, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);\n#else //USE_MPI\n tempout[0]=testpointmindist2;\n //USE_MPI\n //if this processor isn't the closest to the test point it doesn't count as it's flow reaching the point\n if(tempout[0] < testpointmindist2)\n testpointreach = 0;\n\n //did the closest point to the test point get reached by the flow?\n#ifdef USE_MPI\n MPI_Reduce(&testpointreach, &inttempout, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);\n#else //USE_MPI\n inttempout=testpointreach;\n //USE_MPI\n testpointreach = inttempout;\n\n tempin[0] = xC;\n tempin[1] = yC;\n tempin[2] = rC;\n tempin[3] = m_area;\n tempin[4] = m_v_ave;\n tempin[5] = m_vx_ave;\n tempin[6] = m_vy_ave;\n tempin[7] = 0.0;//m_slope_ave;\n tempin[8] = m_piler2;\n tempin[9] = 0.0;//m_slopevolume;\n tempin[10] = testvolume;\n tempin[11] = m_xVar;\n tempin[12] = m_yVar;\n tempin[13] = ElemTable->get_no_of_entries();\n tempin[14] = m_Vol_;\n tempin[15] = m_Area_;\n tempin[16] = m_Velmean_;\n\n#ifdef USE_MPI\n i = MPI_Reduce(tempin, tempout, 16, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);\n#else //USE_MPI\n for(int i=0;i<17;++i)tempout[i]=tempin[i];\n //USE_MPI\n temp2in[0] = m_max_height;\n temp2in[1] = m_v_max;\n#ifdef USE_MPI\n i = MPI_Reduce(temp2in, temp2out, 2, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);\n#else //USE_MPI\n for(int i=0;i<2;++i)temp2out[i]=temp2in[i];\n //USE_MPI\n\n if(myid == 0)\n {\n if(testpointreach && (timereached < 0.0))\n timereached = timeprops->timesec();\n\n double AREA_SCALE = (matprops->scale.length) * (matprops->scale.length);\n double VOL_SCALE = AREA_SCALE * (matprops->scale.height);\n double VELOCITY_SCALE = sqrt(matprops->scale.length * matprops->scale.gravity);\n //dimensionalize\n xcen = tempout[0] * (matprops->scale.length) / tempout[10];\n ycen = tempout[1] * (matprops->scale.length) / tempout[10];\n xvar = tempout[11] * AREA_SCALE / tempout[10] - (xcen) * (xcen);\n yvar = tempout[12] * AREA_SCALE / tempout[10] - (ycen) * (ycen);\n rmean = tempout[2] * (matprops->scale.length) / tempout[10];\n area = tempout[3] * AREA_SCALE;\n vmean = tempout[4] * VELOCITY_SCALE / tempout[10];\n vxmean = tempout[5] * VELOCITY_SCALE / tempout[10];\n vymean = tempout[6] * VELOCITY_SCALE / tempout[10];\n\n //slopemean = (tempout[9] > 0) ? tempout[7] / tempout[9] : 0.0;\n\n realvolume = m_realvolume * VOL_SCALE;\n\n //statvolume is really testvolume which is statvolume if it's not disabled\n statvolume = tempout[10] * VOL_SCALE;\n\n cutoffheight = m_cutoffheight * (matprops->scale.height);//@TODO m_cutoffheight is not init\n testvolume = tempout[10] / m_statvolume;//@TODO m_statvolume is not init\n\n /* the factor of 3^0.5 is a safety factor, this value was chosen because\n * it makes the \"radius\" of a uniformly distributed line equal to half\n * the line length\n */\n //3 standard deviations out ~ 99.5% of the material\n piler = 3.0 * sqrt(xvar + yvar);\n hmax = temp2out[0] * (matprops->scale.height);\n vmax = temp2out[1] * VELOCITY_SCALE;\n\n /* v_star is the nondimensional global average velocity by v_slump\n once v_slump HAS BEEN CALIBRATED (not yet done see ../main/datread.C)\n the calculation will terminate when v_star reaches 1 */\n vstar = vmean / matprops->Vslump;\n\n Vol_ = tempout[14] * VOL_SCALE;\n Area_ = tempout[15] * AREA_SCALE;\n Velmean_ = VELOCITY_SCALE * tempout[16] / tempout[14];\n\n /******************/\n /* output section */\n /******************/\n\n /* output Center Of Mass and x and y components of mean velocity to\n assist the dynamic gis update daemon */\n if(elementType == ElementType::SinglePhase)\n {\n FILE* fp2 = fopen(\"com.up\", \"w\");\n fprintf(fp2, \"%d, %g, %g, %g, %g, %g, %g\\n\", timeprops->iter, timeprops->timesec(), xcen, ycen,\n vxmean, vymean, piler);\n fclose(fp2);\n\n if(_LocalQuants->no_locations > 0 && timeprops->iter % 5 == 4)\n {\n FILE* fp3 = fopen(\"Elements.info\", \"a\");\n fprintf(fp3, \"%.0f, %g\\n\", tempout[13], timeprops->cur_time * timeprops->TIME_SCALE);\n fclose(fp3);\n }\n }\n /* standard to screen output */\n d_time *= timeprops->TIME_SCALE;\n //chunk time\n int hours, minutes;\n double seconds;\n timeprops->chunktime(&hours, &minutes, &seconds);\n\n printf(\"At the end of time step %d the time is %d:%02d:%g (hrs:min:sec),\\n\", timeprops->iter, hours, minutes,\n seconds);\n printf(\"\\ttime step length is %g [sec], volume is %g [m^3],\\n\", d_time, statvolume);\n printf(\"\\tmax height is %g [m], max velocity is %g [m/s],\\n\", hmax, vmax);\n printf(\"\\tave velocity is %g [m/s], v* = %g,\\n\", vmean, vstar);\n printf(\"\\ttotal number of elements %.0f\\n\", tempout[13]);\n printf(\"\\txyminmax %.9e %.9e %.9e %.9e\\n\", xyminmax[0], xyminmax[1], xyminmax[2], xyminmax[3]);\n printf(\"\\n\");\n }\n\n return;\n} #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) \\"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/flux_srcs.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": ">size();\n \n\n if(fluxprops->MaxInfluxNow(matprops, timeprops) > 0.0)\n {\n for (ti_ndx_t ndx = 0; ndx < N; ndx++)\n {\n if (adapted[ndx] > 0)\n //if this element doesn't belong on this processor don't involve\n ElemTable->ElemProp->calc_flux(ndx, fluxprops, timeprops);\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/flux_srcs.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "p->calc_flux(ndx, fluxprops, timeprops);\n }\n }\n else\n {\n for (ti_ndx_t ndx = 0; ndx < N; ndx++)\n {\n if (adapted[ndx] > 0)\n //if this element doesn't belong on this processor don't involve\n elements[ndx].zero_influx();\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/flux_srcs.C", "omp_pragma_line": "#pragma omp parallel for private(entryptr,EmTemp)", "context_chars": 100, "text": "rt(0);\n }\n */\n // mdj 2007-04\n Element *EmTemp;\n ////@ElementsBucketDoubleLoop\n for(int ibuck = 0; ibuck < no_of_buckets; ibuck++)\n {\n for(int ielm = 0; ielm < bucket[ibuck].ndx.size(); ielm++)\n {\n EmTemp = &(elenode_[bucket[ibuck].ndx[ielm]]);\n if(EmTemp->adapted_flag() > 0)\n //if this element doesn't belong on this processor don't involve\n EmTemp->calc_flux(NodeTable, fluxprops, timeprops);\n }\n } #pragma omp parallel for private(entryptr,EmTemp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/flux_srcs.C", "omp_pragma_line": "#pragma omp parallel for private(entryptr,EmTemp)", "context_chars": 100, "text": " }\n }\n else\n {\n\n // mdj 2007-04\n Element *EmTemp;\n ////@ElementsBucketDoubleLoop\n for(int ibuck = 0; ibuck < no_of_buckets; ibuck++)\n {\n for(int ielm = 0; ielm < bucket[ibuck].ndx.size(); ielm++)\n {\n EmTemp = &(elenode_[bucket[ibuck].ndx[ielm]]);\n if(EmTemp->adapted_flag() > 0)\n //if this element doesn't belong on this processor don't involve\n EmTemp->zero_influx();\n }\n } #pragma omp parallel for private(entryptr,EmTemp)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/edge_states.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "ICAL_OMP\n vector &localoutflow=dtmp;\n localoutflow.resize(elements_.size());\n \n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n localoutflow[ndx]=0.0;\n if(adapted_[ndx] > 0)//if this element does not belong on this processor don't involve!!!\n {\n //if this element doesn't belong on this processor don't involve\n double pheight = state_vars_[0][ndx];\n elements_[ndx].calc_edge_states(ElemTable, NodeTable, matprops_ptr, integrator, myid, timeprops_ptr->dtime, order,\n &(localoutflow[ndx]));\n\n double pheight2 = state_vars_[0][ndx];\n if(pheight != pheight2)\n printf(\"prolbem of changing height here,,,.....\\n\");\n }\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/edge_states.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(+:localoutflow_sum)", "context_chars": 100, "text": "nts_.size(); ndx++)\n outflow+=localoutflow[ndx];\n#else\n double localoutflow_sum=0.0;\n\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if(adapted_[ndx] > 0)//if this element does not belong on this processor don't involve!!!\n {\n //if this element doesn't belong on this processor don't involve\n double pheight = state_vars_[0][ndx];\n double localoutflow;\n elements_[ndx].calc_edge_states(ElemTable, NodeTable, matprops_ptr, integrator, myid, timeprops_ptr->dtime, order,\n &localoutflow);\n localoutflow_sum+=localoutflow;\n double pheight2 = state_vars_[0][ndx];\n if(pheight != pheight2)\n printf(\"prolbem of changing height here,,,.....\\n\");\n }\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(+:localoutflow_sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/get_coef_and_eigen.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(min:min_dx_dy_evalue) reduction(max:hmax)", "context_chars": 100, "text": " dt[2] = c_dmin1(dttemp, dttemp2);\n } //end of section that SHOULD ____NOT___ be openmp'd\n\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if((adapted_[ndx] > 0) || ((adapted_[ndx] < 0) && (ghost_flag == 1)))\n {\n //if this element does not belong on this processor don't involve!!!\n\n if(h[ndx] > GEOFLOW_TINY)\n {\n double VxVy[2];\n double evalue;\n\n /* calculate hmax */\n if(hmax < h[ndx])\n hmax = h[ndx];\n\n gmfggetcoef_C(h[ndx], hVx[ndx], hVy[ndx],\n dh_dx[ndx], dhVx_dx[ndx],\n dh_dy[ndx], dhVy_dy[ndx],\n matprops_ptr->bedfrict[material_[ndx]], int_frict,\n kactxy_[0][ndx], kactxy_[1][ndx], tiny, scale_.epsilon);\n\n elements_[ndx].calc_stop_crit(matprops_ptr, this);\n\n if((stoppedflags_[ndx] < 0) || (stoppedflags_[ndx] > 2))\n printf(\"get_coef_and_eigen stopped flag=%d\\n\", stoppedflags_[ndx]);\n\n //must use hVx/h and hVy/h rather than eval_velocity (L'Hopital's\n //rule speed if it is smaller) because underestimating speed (which\n //results in over estimating the timestep) is fatal to stability...\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n\n //eigen_(EmTemp->eval_state_vars(u_vec_alt),\n eigen_C(h[ndx], eigenvxymax_[0][ndx],eigenvxymax_[1][ndx],\n evalue, tiny, kactxy_[0][ndx], gravity_[2][ndx], VxVy);\n\n // ***********************************************************\n // !!!!!!!!!!!!!!!!!!!!!check dx & dy!!!!!!!!!!!!!!!!!!!!!!!!\n // ***********************************************************\n if(evalue > 1000000000.)\n {\n double maxcurve = (dabs(curvature_[0][ndx]) > dabs(curvature_[1][ndx]) ? curvature_[0][ndx] : curvature_[1][ndx]);\n printf(\" eigenvalue is %e for procd %d momentums are %e %e for pile height %e curvature=%e (x,y)=(%e,%e)\\n\",\n evalue, myid, hVx[ndx], hVy[ndx],\n h[ndx], maxcurve, coord_[0][ndx],\n coord_[1][ndx]);\n assert(0);\n }\n\n min_dx_dy_evalue = min( min(dx_[0][ndx], dx_[1][ndx]) / evalue, min_dx_dy_evalue);\n }\n else\n {\n stoppedflags_[ndx]=2;\n }\n }\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(min:min_dx_dy_evalue) reduction(max:hmax)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/get_coef_and_eigen.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(min:min_dx_dy_evalue) reduction(max:hmax)", "context_chars": 100, "text": " dt[2] = c_dmin1(dttemp, dttemp2);\n } //end of section that SHOULD ____NOT___ be openmp'd\n\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if((adapted_[ndx] > 0) || ((adapted_[ndx] < 0) && (ghost_flag == 1)))\n {\n //if this element does not belong on this processor don't involve!!!\n\n if(h[ndx] > GEOFLOW_TINY)\n {\n double VxVy[2];\n double evalue;\n\n /* calculate hmax */\n if(hmax < h[ndx])\n hmax = h[ndx];\n\n gmfggetcoef_VS(kactxy_[0][ndx], kactxy_[1][ndx], scale_.epsilon);\n\n elements_[ndx].calc_stop_crit(matprops_ptr, this);\n\n if((stoppedflags_[ndx] < 0) || (stoppedflags_[ndx] > 2))\n printf(\"get_coef_and_eigen stopped flag=%d\\n\", stoppedflags_[ndx]);\n\n //must use hVx/h and hVy/h rather than eval_velocity (L'Hopital's\n //rule speed if it is smaller) because underestimating speed (which\n //results in over estimating the timestep) is fatal to stability...\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n\n //eigen_(EmTemp->eval_state_vars(u_vec_alt),\n eigen_VS(h[ndx], eigenvxymax_[0][ndx],eigenvxymax_[1][ndx],\n evalue, tiny, kactxy_[0][ndx], gravity_[2][ndx], VxVy);\n\n // ***********************************************************\n // !!!!!!!!!!!!!!!!!!!!!check dx & dy!!!!!!!!!!!!!!!!!!!!!!!!\n // ***********************************************************\n if(evalue > 1000000000.)\n {\n double maxcurve = (dabs(curvature_[0][ndx]) > dabs(curvature_[1][ndx]) ? curvature_[0][ndx] : curvature_[1][ndx]);\n printf(\" eigenvalue is %e for procd %d momentums are %e %e for pile height %e curvature=%e (x,y)=(%e,%e)\\n\",\n evalue, myid, hVx[ndx], hVy[ndx],\n h[ndx], maxcurve, coord_[0][ndx],\n coord_[1][ndx]);\n assert(0);\n }\n\n min_dx_dy_evalue = min( min(dx_[0][ndx], dx_[1][ndx]) / evalue, min_dx_dy_evalue);\n }\n else\n {\n stoppedflags_[ndx]=2;\n }\n }\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(min:min_dx_dy_evalue) reduction(max:hmax)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/get_coef_and_eigen.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(min:min_dx_dy_evalue) reduction(max:hmax)", "context_chars": 100, "text": " dt[2] = c_dmin1(dttemp, dttemp2);\n } //end of section that SHOULD ____NOT___ be openmp'd\n\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if((adapted_[ndx] > 0) || ((adapted_[ndx] < 0) && (ghost_flag == 1)))\n {\n //if this element does not belong on this processor don't involve!!!\n\n if(h[ndx] > GEOFLOW_TINY)\n {\n double VxVy[2];\n double evalue;\n\n /* calculate hmax */\n if(hmax < h[ndx])\n hmax = h[ndx];\n\n gmfggetcoef_PF(kactxy_[0][ndx], kactxy_[1][ndx], scale_.epsilon);\n\n elements_[ndx].calc_stop_crit(matprops_ptr, this);\n\n if((stoppedflags_[ndx] < 0) || (stoppedflags_[ndx] > 2))\n printf(\"get_coef_and_eigen stopped flag=%d\\n\", stoppedflags_[ndx]);\n\n //must use hVx/h and hVy/h rather than eval_velocity (L'Hopital's\n //rule speed if it is smaller) because underestimating speed (which\n //results in over estimating the timestep) is fatal to stability...\n VxVy[0] = hVx[ndx] / h[ndx];\n VxVy[1] = hVy[ndx] / h[ndx];\n\n //eigen_(EmTemp->eval_state_vars(u_vec_alt),\n eigen_PF(h[ndx], eigenvxymax_[0][ndx],eigenvxymax_[1][ndx],\n evalue, tiny, kactxy_[0][ndx], gravity_[2][ndx], VxVy);\n\n // ***********************************************************\n // !!!!!!!!!!!!!!!!!!!!!check dx & dy!!!!!!!!!!!!!!!!!!!!!!!!\n // ***********************************************************\n if(evalue > 1000000000.)\n {\n double maxcurve = (dabs(curvature_[0][ndx]) > dabs(curvature_[1][ndx]) ? curvature_[0][ndx] : curvature_[1][ndx]);\n printf(\" eigenvalue is %e for procd %d momentums are %e %e for pile height %e curvature=%e (x,y)=(%e,%e)\\n\",\n evalue, myid, hVx[ndx], hVy[ndx],\n h[ndx], maxcurve, coord_[0][ndx],\n coord_[1][ndx]);\n assert(0);\n }\n\n min_dx_dy_evalue = min( min(dx_[0][ndx], dx_[1][ndx]) / evalue, min_dx_dy_evalue);\n }\n else\n {\n stoppedflags_[ndx]=2;\n }\n }\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(min:min_dx_dy_evalue) reduction(max:hmax)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/geoflow/get_coef_and_eigen.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(min:min_dx_dy_evalue) reduction(max:hmax)", "context_chars": 100, "text": "2] = c_dmin1(dttemp, dttemp2);\n } //end of section that SHOULD ____NOT___ be openmp'd\n \n\n\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if((adapted_[ndx] > 0) || ((adapted_[ndx] < 0) && (ghost_flag == 1)))\n {\n //if this element does not belong on this processor don't involve!!!\n \n if(h[ndx] > GEOFLOW_TINY)\n {\n double Vsolid[2], Vfluid[2];\n double evalue;\n /* calculate hmax */\n if(hmax < h[ndx])\n hmax = h[ndx];\n \n\n gmfggetcoef2ph(h_liq[ndx],hVx_sol[ndx],hVy_sol[ndx],\n dh_liq_dx[ndx],dhVx_sol_dx[ndx],\n dh_liq_dy[ndx],dhVy_sol_dy[ndx],\n matprops_ptr->bedfrict[material_[ndx]], int_frict,\n kactxy_[0][ndx], kactxy_[1][ndx], tiny, scale_.epsilon);\n\n elements_[ndx].calc_stop_crit(matprops_ptr, this);\n\n if((stoppedflags_[ndx] < 0) || (stoppedflags_[ndx] > 2))\n printf(\"get_coef_and_eigen stopped flag=%d\\n\", stoppedflags_[ndx]);\n\n //must use hVx/h and hVy/h rather than eval_velocity (L'Hopital's\n //rule speed if it is smaller) because underestimating speed (which\n //results in over estimating the timestep) is fatal to stability...\n Vsolid[0] = hVx_sol[ndx] / h_liq[ndx];\n Vsolid[1] = hVy_sol[ndx] / h_liq[ndx];\n\n Vfluid[0] = hVx_liq[ndx] / h[ndx];\n Vfluid[1] = hVy_liq[ndx] / h[ndx];\n\n //eigen_(EmTemp->eval_state_vars(u_vec_alt),\n eigen2ph(h[ndx], h_liq[ndx], eigenvxymax_[0][ndx],\n eigenvxymax_[1][ndx], evalue, tiny, kactxy_[0][ndx],\n gravity_[2][ndx], Vsolid, Vfluid,\n matprops2_ptr->flow_type);\n\n // ***********************************************************\n // !!!!!!!!!!!!!!!!!!!!!check dx & dy!!!!!!!!!!!!!!!!!!!!!!!!\n // ***********************************************************\n if(evalue > 1000000000.)\n {\n double maxcurve = (dabs(curvature_[0][ndx]) > dabs(curvature_[1][ndx]) ? curvature_[0][ndx] : curvature_[1][ndx]);\n fprintf(stderr,\n \"eigenvalue is %e for procd %d momentums are:\\n \\\n solid :(%e, %e) \\n \\\n fluid :(%e, %e) \\n \\\n for pile height %e curvature=%e (x,y)=(%e,%e)\\n\",\n evalue, myid, hVx_sol[ndx],hVy_sol[ndx],\n hVx_liq[ndx],hVy_liq[ndx],\n h[ndx], maxcurve, coord_[0][ndx], coord_[1][ndx]);\n assert(0);\n }\n \n min_dx_dy_evalue = min( min(dx_[0][ndx], dx_[1][ndx]) / evalue, min_dx_dy_evalue);\n }\n else\n {\n elements_[ndx].calc_stop_crit(matprops2_ptr, this); // ensure decent values of kactxy\n }\n \n }\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK) reduction(min:min_dx_dy_evalue) reduction(max:hmax)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/datstr/hashtab2.C", "omp_pragma_line": "#pragma omp parallel for schedule(guided,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": " NodeHashTable::removeNodes(const ti_ndx_t *nodes_to_delete, const ti_ndx_t Nnodes_to_delete)\n{\n for(int i=0;i=0);\n\n\n SFC_Key keyi=key_[ndx];\n int entry = hash(keyi);\n\n IF_OMP(omp_set_lock(&(bucket_lock[entry])));\n if(status_[ndx]>=0)/*nodes_to_delete might contain duplicates which might be removed while waiting for lock*/\n {\n ASSERT2(ti_ndx_not_negative(lookup_ndx(key_[ndx])));\n int entry_size = bucket[entry].key.size();\n ti_ndx_t bucket_entry_ndx=bucket[entry].lookup_local_ndx(keyi);\n\n if(ti_ndx_not_negative(bucket_entry_ndx))\n {\n //delete\n bucket[entry].key.erase(bucket[entry].key.begin() + bucket_entry_ndx);\n bucket[entry].ndx.erase(bucket[entry].ndx.begin() + bucket_entry_ndx);\n }\n //set status\n status_[ndx]=CS_Removed;\n }\n IF_OMP(omp_unset_lock(&(bucket_lock[entry])));\n } #pragma omp parallel for schedule(guided,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/datstr/hashtab2.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "ragma omp section\n connection_id_.resize(new_size);\n }\n }\n //set values\n for(int i=0;i #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/datstr/hashtab2.C", "omp_pragma_line": "#pragma omp parallel for schedule(guided,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "dx[iElm][which]=ndx;\n new_node_isnew[iElm][which]=true;\n }\n\n //place to hash table\n for(int i=0;i0)\n {\n //this place is already occupied\n //find proper place to insert it\n int j;\n SFC_Key *keyArr = &(bucket[entry].key[0]);\n for(j=0;jkeyArr[j];++j){}\n\n bucket[entry].key.insert(bucket[entry].key.begin() + j, keyi);\n bucket[entry].ndx.insert(bucket[entry].ndx.begin() + j, ndx);\n }\n else\n {\n //will be first member of the bucket entry\n bucket[entry].key.push_back(keyi);\n bucket[entry].ndx.push_back(ndx);\n }\n IF_OMP(omp_unset_lock(&(bucket_lock[entry])));\n } #pragma omp parallel for schedule(guided,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/datstr/hashtab2.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "ize();\n ti_ndx_t new_size=size()+numElemToRefine*4;\n\n resize(new_size);\n\n //set values\n for(int iElm=0;iElm #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/datstr/hashtab2.C", "omp_pragma_line": "#pragma omp parallel for schedule(guided,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "dx(ndx);\n\n new_sons_ndx[iElm][which]=ndx;\n }\n }\n\n //place to hash table\n for(int iElm=0;iElm0)\n {\n //this place is already occupied\n //find proper place to insert it\n int j;\n SFC_Key *keyArr = &(bucket[entry].key[0]);\n for(j=0;jkeyArr[j];++j){}\n\n bucket[entry].key.insert(bucket[entry].key.begin() + j, keyi);\n bucket[entry].ndx.insert(bucket[entry].ndx.begin() + j, ndx);\n }\n else\n {\n //will be first member of the bucket entry\n bucket[entry].key.push_back(keyi);\n bucket[entry].ndx.push_back(ndx);\n }\n IF_OMP(omp_unset_lock(&(bucket_lock[entry])));\n }\n } #pragma omp parallel for schedule(guided,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/datstr/hashtab2.C", "omp_pragma_line": "#pragma omp parallel for schedule(guided,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "Table::removeElements(const ti_ndx_t *elements_to_delete, const ti_ndx_t Nelements_to_delete)\n{\n for(int i=0;i=0);\n\n SFC_Key keyi=key_[ndx];\n int entry = hash(keyi);\n\n IF_OMP(omp_set_lock(&(bucket_lock[entry])));\n ASSERT2(ti_ndx_not_negative(lookup_ndx(key_[ndx])));\n int entry_size = bucket[entry].key.size();\n ti_ndx_t bucket_entry_ndx=bucket[entry].lookup_local_ndx(keyi);\n\n if(ti_ndx_not_negative(bucket_entry_ndx))\n {\n //set status\n status_[bucket[entry].ndx[bucket_entry_ndx]]=CS_Removed;\n //delete\n bucket[entry].key.erase(bucket[entry].key.begin() + bucket_entry_ndx);\n bucket[entry].ndx.erase(bucket[entry].ndx.begin() + bucket_entry_ndx);\n }\n IF_OMP(omp_unset_lock(&(bucket_lock[entry])));\n } #pragma omp parallel for schedule(guided,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/datstr/elements_properties.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "ptr)\n{\n assert(ElemTable->all_elenodes_are_permanent);\n double gamma=matprops_ptr->gamma;\n for(ti_ndx_t ndx = 0; ndx < elements_.size(); ndx++)\n {\n if(adapted_[ndx] > 0)//if this element does not belong on this processor don't involve!!!\n {\n get_slopes(ndx,gamma);\n }\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/main/properties.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "ic_pressure_by_elm);\n TI_ASSUME_ALIGNED(m_cum_kinergy_by_elm);\n\n if(numprocs>1)\n {\n for(ti_ndx_t ndx = 0; ndx < N; ndx++)\n {\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n\n //update the record of maximum pileheight in the area covered by this element\n double ke = 0.0;\n double dp = 0.0;\n if (h[ndx] > 1.0E-04){\n \tke = 0.5 * (hVx[ndx] * hVx[ndx] + hVy[ndx] * hVy[ndx]) / h[ndx];\n \tdp = ke / h[ndx];\n }\n\n m_cum_kinergy_by_elm[ndx] += ke;\n m_pileheight_by_elm[ndx] = max(m_pileheight_by_elm[ndx], h[ndx]);\n m_max_dynamic_pressure_by_elm[ndx] = max(m_max_dynamic_pressure_by_elm[ndx],dp);\n m_max_kinergy_by_elm[ndx] = max(m_max_kinergy_by_elm[ndx],ke);\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/main/properties.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "max_kinergy_by_elm[ndx] = max(m_max_kinergy_by_elm[ndx],ke);\n }\n }\n else\n {\n for(ti_ndx_t ndx = 0; ndx < N; ndx++)\n {\n //update the record of maximum pileheight in the area covered by this element\n double ke = 0.0;\n double dp = 0.0;\n if (h[ndx] > 1.0E-04){\n \tke = 0.5 * (hVx[ndx] * hVx[ndx] + hVy[ndx] * hVy[ndx]) / h[ndx];\n \tdp = ke / h[ndx];\n }\n\n m_cum_kinergy_by_elm[ndx] += ke;\n m_pileheight_by_elm[ndx] = max(m_pileheight_by_elm[ndx], h[ndx]);\n m_max_dynamic_pressure_by_elm[ndx] = max(m_max_dynamic_pressure_by_elm[ndx],dp);\n m_max_kinergy_by_elm[ndx] = max(m_max_kinergy_by_elm[ndx],ke);\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/main/properties.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "I_ASSUME_ALIGNED(m_max_dynamic_pressure_by_elm);\n TI_ASSUME_ALIGNED(m_cum_kinergy_by_elm);\n\n\n for(ti_ndx_t ndx = 0; ndx < N; ndx++)\n {\n if(adapted_[ndx] <= 0)continue;//if this element does not belong on this processor don't involve!!!\n\n //@TODO check ke for two phases\n double ke = 0.0;\n double dp = 0.0;\n if (h[ndx] > 1.0E-04){\n \tke = 0.5 * (hVx_sol[ndx] * hVx_sol[ndx] + hVy_sol[ndx] * hVy_sol[ndx] + hVx_liq[ndx] * hVx_liq[ndx] + hVy_liq[ndx] * hVy_liq[ndx]) / h[ndx];\n \tdp = ke / h[ndx];\n }\n\n m_cum_kinergy_by_elm[ndx] += ke;\n if(h[ndx] > m_pileheight_by_elm[ndx])\n m_pileheight_by_elm[ndx] = h[ndx];\n if(ke > m_max_kinergy_by_elm[ndx])\n m_max_kinergy_by_elm[ndx] = ke;\n if(dp > m_max_dynamic_pressure_by_elm[ndx])\n m_max_dynamic_pressure_by_elm[ndx] = dp;\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/refine2.C", "omp_pragma_line": "#pragma omp parallel for schedule(static)", "context_chars": 100, "text": "D_RESTART(HAdapt_refineElements_init,pt_start);\n\n\t//find position of corners, sides and bubbles\n for(int iElm=0;iElmnode_key_ndx_[i][ndx];\n\t\t\tASSERT2(ElemTable->node_key_ndx_[i][ndx]==NodeTable->lookup_ndx(ElemTable->node_key_[i][ndx]));\n\t\t\tASSERT2(ti_ndx_not_negative(node_ndx_ref[iElm][i]));\n\t\t}\n\n //-- bubble\n\t\tnode_ndx_ref[iElm][8] = ElemTable->node_bubble_ndx_[ndx];\n\t\tASSERT2(ElemTable->node_bubble_ndx_[ndx]==NodeTable->lookup_ndx(ElemTable->key_[ndx]));\n ASSERT2(ti_ndx_not_negative(node_ndx_ref[iElm][8]));\n } #pragma omp parallel for schedule(static)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/refine2.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "s();\n PROFILING3_STOPADD_RESTART(HAdapt_refineElements_int_nodes_alloc,pt_start);\n\n\t//SIDE 0\n for(int iElm=0;iElmneigh_proc_[0][ndx] == -1)\n\t\t\tboundary = 1;\n\t\telse\n\t\t\tboundary = 0;\n\n\t\tif(boundary == 1 || ElemTable->neigh_gen_[0][ndx] <= ElemTable->generation_[ndx])\n\t\t{\n\t\t //i.e. boundary of the computational domain or neighbor generation same or smaller then this one\n\t\t\tRefinedNeigh = 0;\n\t\t\tinfo = S_S_CON;\n\t\t\tif(ElemTable->neigh_proc_[0][ndx] != myid)\n\t\t\t{\n\t\t\t\tother_proc = 1;\n\t\t\t\tinfo = -1;\n\t\t\t}\n\t\t\telse\n\t\t\t other_proc = 0;\n\n\t\t\twhich = 4;\n\t\t\t//---Fourth new node---\n\t\t\tcheck_create_new_node2(iElm, which, info, RefinedNeigh, boundary);\n\n\t\t\t//---Fourth old node---\n\t\t\tif(RefinedNeigh || boundary)\n\t\t\t\tNodeTable->info_[ndxNodeTemp[4]]=CORNER;\n\t\t\telse if(other_proc)\n\t\t\t\tNodeTable->info_[ndxNodeTemp[4]]=-1;\n\t\t\telse\n\t\t\t\tNodeTable->info_[ndxNodeTemp[4]]=S_C_CON;\n\n\t\t\t//---Fifth new node---\n\t\t\twhich = 5;\n\t\t\t//n2 = (Node*) NodeTable->lookup(EmTemp->node_key(1));\n\n\t\t\tcheck_create_new_node2(iElm, which, info, RefinedNeigh, boundary);\n\t\t}\n\t\telse\n\t\t{\n\t\t //i.e. not boundary of the computational domain and neighbor generation higher then this one\n\t\t //\n\t\t\t//Keith Added this if\n\t\t\tif((ElemTable->neigh_proc_[0][ndx] != myid) || ((ElemTable->neigh_proc_[4][ndx] != myid)\n\t\t\t\t\t&& (ElemTable->neigh_proc_[4][ndx] != -2)))\n\t\t\t\tother_proc = 1;\n\t\t\telse\n\t\t\t\tother_proc = 0;\n\n\t\t\t// fourth new node\n\t\t\tneigh_elm_ndx = ElemTable->neighbor_ndx_[0][ndx];\n\t\t\tASSERT2(neigh_elm_ndx == ElemTable->lookup_ndx(ElemTable->neighbors_[0][ndx]));\n\n\t\t\tn1_ndx = NewNodeNdx[4];\n\t\t\tASSERT2(NewNodeNdx[4] == NodeTable->lookup_ndx(NewNodeKey[4]));\n\t\t\tif(ElemTable->refined_[neigh_elm_ndx] == 0 || ElemTable->refined_[neigh_elm_ndx] == GHOST)\n\t\t\t\tNodeTable->info_[n1_ndx]=SIDE;\n\t\t\telse\n\t\t\t\tNodeTable->info_[n1_ndx]=S_C_CON;\n\t\t\t//fourth old node\n\t\t\tNodeTable->info_[ndxNodeTemp[4]]=CORNER;\n\t\t\tif(other_proc) //ERROR: other_proc is never set, we never checked to see if the more refined neighbor was on another processor\n\t\t\t\tNodeTable->info_[ndxNodeTemp[4]]=-1;\n\n\t\t\t// fifth new node\n\t\t\tneigh_elm_ndx = ElemTable->neighbor_ndx_[4][ndx];\n\t\t\tASSERT2(neigh_elm_ndx == ElemTable->lookup_ndx(ElemTable->neighbors_[4][ndx]));\n\n\t\t\tn1_ndx = NewNodeNdx[5];\n\t\t\tASSERT2(NewNodeNdx[5] == NodeTable->lookup_ndx(NewNodeKey[5]));\n\t\t\tif(ElemTable->refined_[neigh_elm_ndx] == 0 || ElemTable->refined_[neigh_elm_ndx] == GHOST)\n\t\t\t\tNodeTable->info_[n1_ndx]=SIDE;\n\t\t\telse\n\t\t\t\tNodeTable->info_[n1_ndx]=S_C_CON;\n\t\t}\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/refine2.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "}\n }\n PROFILING3_STOPADD_RESTART(HAdapt_refineElements_side0_init,pt_start);\n\n //SIDE1\n for(int iElm=0;iElmneigh_proc_[1][ndx] == -1)\n\t\t\tboundary = 1;\n\t\telse\n\t\t\tboundary = 0;\n\n\t\tif(boundary == 1 || ElemTable->neigh_gen_[1][ndx] <= ElemTable->generation_[ndx])\n\t\t{\n\t\t\tRefinedNeigh = 0;\n\t\t\tinfo = S_S_CON;\n\t\t\tif(ElemTable->neigh_gen_[1][ndx] != myid) // && *(EmTemp->get_neigh_proc()+1)>0)\n\t\t\t{\n\t\t\t\tother_proc = 1;\n\t\t\t\tinfo = -1;\n\t\t\t}\n\t\t\telse\n\t\t\t\tother_proc = 0;\n\n\t\t\t//---Eight new node---\n\t\t\twhich = 8;\n\t\t\tcheck_create_new_node2(iElm, which, info, RefinedNeigh, boundary);\n\n\t\t\t//---Fifth old node---\n\t\t\tif(RefinedNeigh || boundary)\n\t\t\t\tNodeTable->info_[ndxNodeTemp[5]]=CORNER;\n\t\t\telse if(other_proc)\n\t\t\t\tNodeTable->info_[ndxNodeTemp[5]]=info;\n\t\t\telse\n\t\t\t\tNodeTable->info_[ndxNodeTemp[5]]=S_C_CON;\n\n\t\t\t//---Thirteenth new node---\n\t\t\twhich = 13;\n\t\t\tcheck_create_new_node2(iElm, which, info, RefinedNeigh, boundary);\n\t\t\t//check_create_new_node(which, 2, 5, ndxNodeTemp, NewNodeKey, NewNodeNdx, info, RefinedNeigh, boundary);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t//Keith Added this if\n\t\t\tif((ElemTable->neigh_proc_[1][ndx] != myid) || ((ElemTable->neigh_proc_[5][ndx] != myid)\n\t\t\t\t\t&& (ElemTable->neigh_proc_[5][ndx] != -2)))\n\t\t\t\tother_proc = 1;\n\t\t\telse\n\t\t\t\tother_proc = 0;\n\n\t\t\t// eighth new node\n\t\t\tneigh_elm_ndx = ElemTable->neighbor_ndx_[1][ndx];\n\t\t\tASSERT2(neigh_elm_ndx == ElemTable->lookup_ndx(ElemTable->neighbors_[1][ndx]));\n\n\t\t\tn1_ndx =NewNodeNdx[8];\n\t\t\tASSERT2(NewNodeNdx[8] == NodeTable->lookup_ndx(NewNodeKey[8]));\n\n\t\t\tif(ElemTable->refined_[neigh_elm_ndx] == 0 || ElemTable->refined_[neigh_elm_ndx] == GHOST)\n\t\t\t\tNodeTable->info_[n1_ndx]=SIDE;\n\t\t\telse\n\t\t\t\tNodeTable->info_[n1_ndx]=S_C_CON;\n\n\t\t\t// fifth old node\n\t\t\tNodeTable->info_[ndxNodeTemp[5]]=CORNER;\n\t\t\tif(other_proc) //ERROR: other_proc is never set, we never checked to see if the more refined neighbor was on another processor\n\t\t\t\tNodeTable->info_[ndxNodeTemp[5]]=-1;\n\n\t\t\t// thirteenth new node\n\t\t\tneigh_elm_ndx = ElemTable->neighbor_ndx_[5][ndx];\n\t\t\tASSERT2(neigh_elm_ndx == ElemTable->lookup_ndx(ElemTable->neighbors_[5][ndx]));\n\n\t\t\tn1_ndx = NewNodeNdx[13];\n\t\t\tASSERT2(NewNodeNdx[13] == NodeTable->lookup_ndx(NewNodeKey[13]));\n\t\t\tif(ElemTable->refined_[neigh_elm_ndx] == 0 || ElemTable->refined_[neigh_elm_ndx] == GHOST)\n\t\t\t\tNodeTable->info_[n1_ndx]=SIDE;\n\t\t\telse\n\t\t\t\tNodeTable->info_[n1_ndx]=S_C_CON;\n\t\t}\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/refine2.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "}\n }\n PROFILING3_STOPADD_RESTART(HAdapt_refineElements_side1_init,pt_start);\n\n //SIDE2\n for(int iElm=0;iElmneigh_proc_[2][ndx] == -1)\n\t\t\tboundary = 1;\n\t\telse\n\t\t\tboundary = 0;\n\n\t\tif(boundary == 1 || ElemTable->neigh_gen_[2][ndx] <= ElemTable->generation_[ndx])\n\t\t{\n\t\t\tinfo = S_S_CON;\n\n\t\t\tif(ElemTable->neigh_gen_[2][ndx] != myid) // && *(EmTemp->get_neigh_proc()+2)>0)\n\t\t\t{\n\t\t\t\tother_proc = 1;\n\t\t\t\tinfo = -1;\n\t\t\t}\n\t\t\telse\n\t\t\t\tother_proc = 0;\n\n\t\t\tRefinedNeigh = 0;\n\n\t\t\t//---Fourteenth new node---\n\t\t\twhich = 14;\n\t\t\tcheck_create_new_node2(iElm, which, info, RefinedNeigh, boundary);\n\n\t\t\t//---Sixth old node---\n\t\t\tif(RefinedNeigh || boundary)\n\t\t\t\tNodeTable->info_[ndxNodeTemp[6]]=CORNER;\n\t\t\telse if(other_proc)\n\t\t\t\tNodeTable->info_[ndxNodeTemp[6]]=info;\n\t\t\telse\n\t\t\t\tNodeTable->info_[ndxNodeTemp[6]]=S_C_CON;\n\n\t\t\t//---Fifteenth new node---\n\t\t\twhich = 15;\n\t\t\tcheck_create_new_node2(iElm, which, info, RefinedNeigh, boundary);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t//Keith Added this if\n\t\t\tif((ElemTable->neigh_proc_[2][ndx] != myid) || ((ElemTable->neigh_proc_[6][ndx] != myid)\n\t\t\t\t\t&& (ElemTable->neigh_proc_[6][ndx] != -2)))\n\t\t\t\tother_proc = 1;\n\t\t\telse\n\t\t\t\tother_proc = 0;\n\n\t\t\t// fourteenth new node\n\t\t\tneigh_elm_ndx = ElemTable->neighbor_ndx_[6][ndx];\n\t\t\tASSERT2(neigh_elm_ndx == ElemTable->lookup_ndx(ElemTable->neighbors_[6][ndx]));\n\n\t\t\tn1_ndx = NewNodeNdx[14];\n\t\t\tASSERT2(NewNodeNdx[14] == NodeTable->lookup_ndx(NewNodeKey[14]));\n\t\t\tif(ElemTable->refined_[neigh_elm_ndx] == 0 || ElemTable->refined_[neigh_elm_ndx] == GHOST)\n\t\t\t\tNodeTable->info_[n1_ndx]=SIDE;\n\t\t\telse\n\t\t\t\tNodeTable->info_[n1_ndx]=S_C_CON;\n\n\t\t\t// sixth old node\n\t\t\tNodeTable->info_[ndxNodeTemp[6]]=CORNER;\n\t\t\tif(other_proc) //ERROR: other_proc is never set, we never checked to see if the more refined neighbor was on another processor\n\t\t\t\tNodeTable->info_[ndxNodeTemp[6]]=-1;\n\n\t\t\t// fifteenth new node\n\t\t\tneigh_elm_ndx = ElemTable->neighbor_ndx_[2][ndx];\n\t\t\tASSERT2(neigh_elm_ndx == ElemTable->lookup_ndx(ElemTable->neighbors_[2][ndx]));\n\n n1_ndx = NewNodeNdx[15];\n ASSERT2(NewNodeNdx[15] == NodeTable->lookup_ndx(NewNodeKey[15]));\n\t\t\tif(ElemTable->refined_[neigh_elm_ndx] == 0 || ElemTable->refined_[neigh_elm_ndx] == GHOST)\n\t\t\t\tNodeTable->info_[n1_ndx]=SIDE;\n\t\t\telse\n\t\t\t\tNodeTable->info_[n1_ndx]=S_C_CON;\n\t\t}\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/refine2.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "\n }\n PROFILING3_STOPADD_RESTART(HAdapt_refineElements_side2_init,pt_start);\n\n //SIDE 3\n for(int iElm=0;iElmneigh_proc_[3][ndx] == -1)\n\t\t\tboundary = 1;\n\t\telse\n\t\t\tboundary = 0;\n\n\t\tif(boundary == 1 || ElemTable->neigh_gen_[3][ndx] <= ElemTable->generation_[ndx])\n\t\t{\n\t\t\tinfo = S_S_CON;\n\n\t\t\tif(ElemTable->neigh_gen_[3][ndx] != myid) //&& *(EmTemp->get_neigh_proc()+3)>0)\n\t\t\t{\n\t\t\t\tother_proc = 1;\n\t\t\t\tinfo = -1;\n\t\t\t}\n\t\t\telse\n\t\t\t\tother_proc = 0;\n\n\t\t\tRefinedNeigh = 0;\n\n\t\t\t//---Sixth new node----\n\t\t\twhich = 6;\n\t\t\tcheck_create_new_node2(iElm, which, info, RefinedNeigh, boundary);\n\t\t\t//check_create_new_node(which, 0, 7, ndxNodeTemp, NewNodeKey, NewNodeNdx, info, RefinedNeigh, boundary);\n\n\t\t\t//---Seventh old node---\n\t\t\tif(RefinedNeigh || boundary)\n\t\t\t\tNodeTable->info_[ndxNodeTemp[7]]=CORNER;\n\t\t\telse if(other_proc)\n\t\t\t\tNodeTable->info_[ndxNodeTemp[7]]=-1;\n\t\t\telse\n\t\t\t\tNodeTable->info_[ndxNodeTemp[7]]=S_C_CON;\n\n\t\t\t//---Eleventh new node---\n\t\t\twhich = 11;\n\t\t\tcheck_create_new_node2(iElm, which, info, RefinedNeigh, boundary);\n\t\t\t//n1 = (Node*) NodeTable->lookup(EmTemp->node_key(7));\n\t\t\t//n2 = (Node*) NodeTable->lookup(EmTemp->node_key(3));\n\n\t\t\t//check_create_new_node(which, 3, 7, ndxNodeTemp, NewNodeKey, NewNodeNdx, info, RefinedNeigh, boundary);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t//Keith Added this if\n\t\t\tif((ElemTable->neigh_proc_[3][ndx] != myid) || ((ElemTable->neigh_proc_[7][ndx] != myid)\n\t\t\t\t\t&& (ElemTable->neigh_proc_[7][ndx] != -2)))\n\t\t\t\tother_proc = 1;\n\t\t\telse\n\t\t\t\tother_proc = 0;\n\n\t\t\t// sixth new node\n\t\t\tneigh_elm_ndx = ElemTable->neighbor_ndx_[7][ndx];\n\t\t\tASSERT2(neigh_elm_ndx == ElemTable->lookup_ndx(ElemTable->neighbors_[7][ndx]));\n\n\t\t\tn1_ndx = NewNodeNdx[6];\n\t\t\tASSERT2(NewNodeNdx[6] == NodeTable->lookup_ndx(NewNodeKey[6]));\n\t\t\tif(ElemTable->refined_[neigh_elm_ndx] == 0 || ElemTable->refined_[neigh_elm_ndx] == GHOST)\n\t\t\t\tNodeTable->info_[n1_ndx]=SIDE;\n\t\t\telse\n\t\t\t\tNodeTable->info_[n1_ndx]=S_C_CON;\n\n\t\t\t// seventh old node\n\t\t\tNodeTable->info_[ndxNodeTemp[7]]=CORNER;\n\t\t\tif(other_proc) //ERROR: other_proc is never set, we never checked to see if the more refined neighbor was on another processor\n\t\t\t\tNodeTable->info_[ndxNodeTemp[7]]=-1;\n\t\t\t// eleventh new node\n\t\t\tneigh_elm_ndx = ElemTable->neighbor_ndx_[3][ndx];\n\t\t\tASSERT2(neigh_elm_ndx == ElemTable->lookup_ndx(ElemTable->neighbors_[3][ndx]));\n\n n1_ndx = NewNodeNdx[11];\n ASSERT2(NewNodeNdx[11] == NodeTable->lookup_ndx(NewNodeKey[11]));\n\t\t\tif(ElemTable->refined_[neigh_elm_ndx] == 0 || ElemTable->refined_[neigh_elm_ndx] == GHOST)\n\t\t\t\tNodeTable->info_[n1_ndx]=SIDE;\n\t\t\telse\n\t\t\t\tNodeTable->info_[n1_ndx]=S_C_CON;\n\t\t}\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/refine2.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "=S_C_CON;\n\t\t}\n }\n PROFILING3_STOPADD_RESTART(HAdapt_refineElements_side3_init,pt_start);\n\n for(int iElm=0;iElminfo_[ElemTable->node_bubble_ndx_[iElm]]=CORNER;\n\n\t\t//++++++++++++++++INTERNAL SIDE NODES 7, 8OLD, 12, 9, 10\n\t\t//---Seventh new node---\n\t\twhich=7;\n\t\tNodeTable->elenode_[new_node_ndx[iElm][which]].init(new_node_key[iElm][which], &(new_node_coord[iElm][which][0]), SIDE, -3, matprops_ptr);\n\n\t\t//---Twelwth new node---\n\t\twhich=12;\n\t\tNodeTable->elenode_[new_node_ndx[iElm][which]].init(new_node_key[iElm][which], &(new_node_coord[iElm][which][0]), SIDE, -3, matprops_ptr);\n\n\t\t//---Ninth new node---\n\t\twhich=9;\n\t\tNodeTable->elenode_[new_node_ndx[iElm][which]].init(new_node_key[iElm][which], &(new_node_coord[iElm][which][0]), SIDE, -3, matprops_ptr);\n\n\t\t//---Tenth new node---\n\t\twhich=10;\n\t\tNodeTable->elenode_[new_node_ndx[iElm][which]].init(new_node_key[iElm][which], &(new_node_coord[iElm][which][0]), SIDE, -3, matprops_ptr);\n\n\t\t//+++++++++++++++++++THE NEW BUBBLES 0, 1, 2, 3\n\t\t//---0th new node---\n\t\twhich=0;\n\t\tNodeTable->elenode_[new_node_ndx[iElm][which]].init(new_node_key[iElm][which], &(new_node_coord[iElm][which][0]), BUBBLE, -3, matprops_ptr);\n\n\t\t//---1st new node---\n\t\twhich=1;\n\t\tNodeTable->elenode_[new_node_ndx[iElm][which]].init(new_node_key[iElm][which], &(new_node_coord[iElm][which][0]), BUBBLE, -3, matprops_ptr);\n\n\t\t//---2nd new node---\n\t\twhich=2;\n\t\tNodeTable->elenode_[new_node_ndx[iElm][which]].init(new_node_key[iElm][which], &(new_node_coord[iElm][which][0]), BUBBLE, -3, matprops_ptr);\n\n\t\t//---3rd new node---\n\t\twhich=3;\n\t\tNodeTable->elenode_[new_node_ndx[iElm][which]].init(new_node_key[iElm][which], &(new_node_coord[iElm][which][0]), BUBBLE, -3, matprops_ptr);\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/refine2.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": ",new_node_isnew);\n\n PROFILING3_STOPADD_RESTART(HAdapt_refineElements_new_elm_aloc,pt_start);\n for(int iElm=0;iElmgeneration_[ndx] + 1;\n\t\tint neigh_gen[4];\n\t\tint material = ElemTable->material_[ndx];\n\n\t\tdouble coord[DIMENSION];\n\t\t//---0th new element---\n\n ti_ndx_t ndxQuad9P;\n\n\t\t//the nodes\n\n\t\tnodes[0] = ElemTable->node_key_[0][ndx];\n\t\tnodes[1] = ElemTable->node_key_[4][ndx];\n\t\tnodes[2] = ElemTable->key_[ndx];\n\t\tnodes[3] = ElemTable->node_key_[7][ndx];\n\t\tnodes[4] = NewNodeKey[4];\n\t\tnodes[5] = NewNodeKey[7];\n\t\tnodes[6] = NewNodeKey[9];\n\t\tnodes[7] = NewNodeKey[6];\n\t\tnodes[8] = NewNodeKey[0];\n\n\t\tnodes_ndx[0] = ElemTable->node_key_ndx_[0][ndx];\n\t\tnodes_ndx[1] = ElemTable->node_key_ndx_[4][ndx];\n\t\tnodes_ndx[2] = ElemTable->node_bubble_ndx_[ndx];\n\t\tnodes_ndx[3] = ElemTable->node_key_ndx_[7][ndx];\n nodes_ndx[4] = NewNodeNdx[4];\n nodes_ndx[5] = NewNodeNdx[7];\n nodes_ndx[6] = NewNodeNdx[9];\n nodes_ndx[7] = NewNodeNdx[6];\n nodes_ndx[8] = NewNodeNdx[0];\n\n\t\tn1_ndx = nodes_ndx[8];\n\t\tfor(i = 0; i < DIMENSION; i++)\n\t\t\tcoord[i] = NodeTable->coord_[i][n1_ndx];\n\t\t//neighbors\n\t\tneigh[0] = neigh[4] = ElemTable->neighbors_[0][ndx]; //Why is this ok if not ok for 3 down\n\t\tneigh[1] = neigh[5] = NewNodeKey[1];\n\t\tneigh[2] = neigh[6] = NewNodeKey[3];\n\t\tif(ElemTable->neigh_proc_[7][ndx]!= -2)\n\t\t\tneigh[3] = neigh[7] = ElemTable->neighbors_[7][ndx]; //This should be okay no matter what\n\t\telse\n\t\t\tneigh[3] = neigh[7] = ElemTable->neighbors_[3][ndx]; //This is only ok if neigh_proc==-2\n\n\t\tneigh_ndx[0] = neigh_ndx[4] = ElemTable->neighbor_ndx_[0][ndx]; //Why is this ok if not ok for 3 down\n\t\tneigh_ndx[1] = neigh_ndx[5] = ndxSons[1];\n\t\tneigh_ndx[2] = neigh_ndx[6] = ndxSons[3];\n if(ElemTable->neigh_proc_[7][ndx]!= -2)\n neigh_ndx[3] = neigh_ndx[7] = ElemTable->neighbor_ndx_[7][ndx]; //This should be okay no matter what\n else\n neigh_ndx[3] = neigh_ndx[7] = ElemTable->neighbor_ndx_[3][ndx]; //This is only ok if neigh_proc==-2\n\n\t\t//process of the neighbors\n\n\t\tneigh_proc[0] = ElemTable->neigh_proc_[0][ndx];\n\t\tneigh_proc[1] = myid;\n\t\tneigh_proc[2] = myid;\n\t\tif(ElemTable->neigh_proc_[7][ndx] != -2)\n\t\t\tneigh_proc[3] = ElemTable->neigh_proc_[7][ndx]; //depending if the neighboring element is already refined\n\t\telse\n\t\t\tneigh_proc[3] = ElemTable->neigh_proc_[3][ndx];\n\n\t\tneigh_proc[4] = neigh_proc[5] = neigh_proc[6] = neigh_proc[7] = -2;\n\n\t\tneigh_gen[0] = ElemTable->neigh_gen_[0][ndx];\n\t\tneigh_gen[1] = generation;\n\t\tneigh_gen[2] = generation;\n\t\tneigh_gen[3] = ElemTable->neigh_gen_[3][ndx];\n\n\n\t\tdouble err = ElemTable->el_error_[0][ndx] * .5; //added by jp oct11\n\t\tdouble sol = ElemTable->el_solution_[0][ndx] * .5; //added by jp oct11\n\t\t// son 0 can use elm_loc\n\t\tint iwetnodefather = ElemTable->iwetnode_[ndx];\n\t\tdouble Awetfather = ElemTable->Awet_[ndx];\n\t\tdouble dpson[2];\n\t\tdpson[0] = ElemTable->drypoint_[0][ndx] * 2 + 0.5;\n\t\tdpson[1] = ElemTable->drypoint_[1][ndx] * 2 + 0.5;\n\n int elm_loc[2], my_elm_loc[2];\n elm_loc[0] = 2 * ElemTable->elm_loc_[0][ndx];\n elm_loc[1] = 2 * ElemTable->elm_loc_[1][ndx];\n\n\t\t//init new element\n\t\tndxQuad9P = ndxSons[0];\n\t\tElemTable->elenode_[ndxQuad9P].init(nodes, nodes_ndx, neigh, neigh_ndx, neigh_proc, generation, elm_loc, NULL, neigh_gen, material,\n\t\t\t\t\t\t\t ndx, coord, ElemTable, NodeTable, myid, matprops_ptr, iwetnodefather, Awetfather,\n\t\t\t\t\t\t\t dpson);\n\t\tElemTable->which_son_[ndxQuad9P]=0; //--by jp, 0 means son 0\n\t\tElemTable->elenode_[ndxQuad9P].putel_sq(sol, err); //added by jp oct11\n\n\n\t\t//---1st new element---\n\n\t\t//the nodes\n\n\t\tnodes[0] = ElemTable->node_key_[4][ndx];\n\t\tnodes[1] = ElemTable->node_key_[1][ndx];\n\t\tnodes[2] = ElemTable->node_key_[5][ndx];\n\t\tnodes[3] = ElemTable->key_[ndx];\n\t\tnodes[4] = NewNodeKey[5];\n\t\tnodes[5] = NewNodeKey[8];\n\t\tnodes[6] = NewNodeKey[10];\n\t\tnodes[7] = NewNodeKey[7];\n\t\tnodes[8] = NewNodeKey[1];\n\n\t\tnodes_ndx[0] = ElemTable->node_key_ndx_[4][ndx];\n\t\tnodes_ndx[1] = ElemTable->node_key_ndx_[1][ndx];\n\t\tnodes_ndx[2] = ElemTable->node_key_ndx_[5][ndx];\n\t\tnodes_ndx[3] = ElemTable->node_bubble_ndx_[ndx];\n\t\tnodes_ndx[4] = NewNodeNdx[5];\n\t\tnodes_ndx[5] = NewNodeNdx[8];\n\t\tnodes_ndx[6] = NewNodeNdx[10];\n\t\tnodes_ndx[7] = NewNodeNdx[7];\n\t\tnodes_ndx[8] = NewNodeNdx[1];\n\n\t\tn1_ndx = nodes_ndx[8];\n\t\tfor(i = 0; i < DIMENSION; i++)\n\t\t\tcoord[i] = NodeTable->coord_[i][n1_ndx];\n\n\t\t//neighbors\n\t\tif(ElemTable->neigh_proc_[4][ndx] != -2)\n\t\t\tneigh[0] = neigh[4] = ElemTable->neighbors_[4][ndx]; //this should be ok now matter what\n\t\telse\n\t\t\tneigh[0] = neigh[4] = ElemTable->neighbors_[0][ndx]; //this is only ok if neigh_proc==-2\n\t\tneigh[1] = neigh[5] = ElemTable->neighbors_[1][ndx];\n\t\tneigh[2] = neigh[6] = NewNodeKey[2];\n\t\tneigh[3] = neigh[7] = NewNodeKey[0];\n\n\t\tif(ElemTable->neigh_proc_[4][ndx] != -2)\n\t\t neigh_ndx[0] = neigh_ndx[4] = ElemTable->neighbor_ndx_[4][ndx]; //this should be ok now matter what\n else\n neigh_ndx[0] = neigh_ndx[4] = ElemTable->neighbor_ndx_[0][ndx]; //this is only ok if neigh_proc==-2\n\t\tneigh_ndx[1] = neigh_ndx[5] = ElemTable->neighbor_ndx_[1][ndx];\n\t\tneigh_ndx[2] = neigh_ndx[6] = ndxSons[2];\n neigh_ndx[3] = neigh_ndx[7] = ndxSons[0];\n\n\t\t//process of the neighbors\n\n\t\tneigh_proc[0] = (ElemTable->neigh_proc_[4][ndx] != -2) ? ElemTable->neigh_proc_[4][ndx] : ElemTable->neigh_proc_[0][ndx];\n\t\tneigh_proc[1] = ElemTable->neigh_proc_[1][ndx];\n\t\tneigh_proc[2] = myid;\n\t\tneigh_proc[3] = myid;\n\n\t\tneigh_proc[4] = neigh_proc[5] = neigh_proc[6] = neigh_proc[7] = -2;\n\n\t\tneigh_gen[0] = ElemTable->neigh_gen_[0][ndx];\n\t\tneigh_gen[1] = ElemTable->neigh_gen_[1][ndx];\n\t\tneigh_gen[2] = generation;\n\t\tneigh_gen[3] = generation;\n\n\t\tmy_elm_loc[0] = elm_loc[0] + 1;\n\t\tmy_elm_loc[1] = elm_loc[1];\n\t\tdpson[0] = ElemTable->drypoint_[0][ndx] * 2 - 0.5;\n\t\tdpson[1] = ElemTable->drypoint_[1][ndx] * 2 + 0.5;\n\n //init new element\n ndxQuad9P = ndxSons[1];\n ElemTable->elenode_[ndxQuad9P].init(nodes, nodes_ndx, neigh, neigh_ndx, neigh_proc, generation, my_elm_loc, NULL, neigh_gen, material,\n\t\t\t\t\t\t\t ndx, coord, ElemTable, NodeTable, myid, matprops_ptr, iwetnodefather, Awetfather,\n\t\t\t\t\t\t\t dpson);\n\t\tElemTable->which_son_[ndxQuad9P]=1; //--by jp\n\t\tElemTable->elenode_[ndxQuad9P].putel_sq(sol, err); //added by jp oct11\n\n\n\t\t//---2nd new element---\n\n\t\t//the nodes\n\t\tnodes[0] = ElemTable->key_[ndx];\n\t\tnodes[1] = ElemTable->node_key_[5][ndx];\n\t\tnodes[2] = ElemTable->node_key_[2][ndx];\n\t\tnodes[3] = ElemTable->node_key_[6][ndx];\n\t\tnodes[4] = NewNodeKey[10];\n\t\tnodes[5] = NewNodeKey[13];\n\t\tnodes[6] = NewNodeKey[15];\n\t\tnodes[7] = NewNodeKey[12];\n\t\tnodes[8] = NewNodeKey[2];\n\n\t\tnodes_ndx[0] = ElemTable->node_bubble_ndx_[ndx];\n nodes_ndx[1] = ElemTable->node_key_ndx_[5][ndx];\n nodes_ndx[2] = ElemTable->node_key_ndx_[2][ndx];\n nodes_ndx[3] = ElemTable->node_key_ndx_[6][ndx];\n nodes_ndx[4] = NewNodeNdx[10];\n nodes_ndx[5] = NewNodeNdx[13];\n nodes_ndx[6] = NewNodeNdx[15];\n nodes_ndx[7] = NewNodeNdx[12];\n nodes_ndx[8] = NewNodeNdx[2];\n\n\t\tn1_ndx = nodes_ndx[8];\n\t\tfor(i = 0; i < DIMENSION; i++)\n\t\t\tcoord[i] = NodeTable->coord_[i][n1_ndx];\n\n\t\t//neighbors\n\t\tneigh[0] = neigh[4] = NewNodeKey[1];\n\t\tif(ElemTable->neigh_proc_[5][ndx] != -2)\n\t\t\tneigh[1] = neigh[5] = ElemTable->neighbors_[5][ndx]; //This should be ok no matter what\n\t\telse\n\t\t\tneigh[1] = neigh[5] = ElemTable->neighbors_[1][ndx]; //this is only ok is neigh_proc==-2\n\t\tneigh[2] = neigh[6] = ElemTable->neighbors_[2][ndx];\n\t\tneigh[3] = neigh[7] = NewNodeKey[3];\n\n\t\tneigh_ndx[0] = neigh_ndx[4] = ndxSons[1];\n if(ElemTable->neigh_proc_[5][ndx] != -2)\n neigh_ndx[1] = neigh_ndx[5] = ElemTable->neighbor_ndx_[5][ndx]; //This should be ok no matter what\n else\n neigh_ndx[1] = neigh_ndx[5] = ElemTable->neighbor_ndx_[1][ndx]; //this is only ok is neigh_proc==-2\n neigh_ndx[2] = neigh_ndx[6] = ElemTable->neighbor_ndx_[2][ndx];\n neigh_ndx[3] = neigh_ndx[7] = ndxSons[3];\n\n\n\t\t//process of the neighbors\n\n\t\tneigh_proc[0] = myid;\n\t\tneigh_proc[1] = (ElemTable->neigh_proc_[5][ndx] != -2) ? ElemTable->neigh_proc_[5][ndx] : ElemTable->neigh_proc_[1][ndx];\n\t\tneigh_proc[2] = ElemTable->neigh_proc_[2][ndx];\n\t\tneigh_proc[3] = myid;\n\n\t\tneigh_proc[4] = neigh_proc[5] = neigh_proc[6] = neigh_proc[7] = -2;\n\n\t\tneigh_gen[0] = generation;\n\t\tneigh_gen[1] = ElemTable->neigh_gen_[1][ndx];\n\t\tneigh_gen[2] = ElemTable->neigh_gen_[2][ndx];\n\t\tneigh_gen[3] = generation;\n\n\t\tmy_elm_loc[0] = elm_loc[0] + 1;\n\t\tmy_elm_loc[1] = elm_loc[1] + 1;\n\t\tdpson[0] = ElemTable->drypoint_[0][ndx] * 2 - 0.5;\n\t\tdpson[1] = ElemTable->drypoint_[1][ndx] * 2 - 0.5;\n\n //init new element\n ndxQuad9P = ndxSons[2];\n ElemTable->elenode_[ndxQuad9P].init(nodes, nodes_ndx, neigh, neigh_ndx, neigh_proc, generation, my_elm_loc, NULL, neigh_gen, material,\n\t\t\t\t\t\t\t ndx, coord, ElemTable, NodeTable, myid, matprops_ptr, iwetnodefather, Awetfather,\n\t\t\t\t\t\t\t dpson);\n\t\tElemTable->which_son_[ndxQuad9P]=2; //--by jp\n\t\tElemTable->elenode_[ndxQuad9P].putel_sq(sol, err); //added by jp oct11\n\n\n\n\t\t//---3rd new element---\n\n\t\t//the nodes\n\t\tnodes[0] = ElemTable->node_key_[7][ndx];\n\t\tnodes[1] = ElemTable->key_[ndx];\n\t\tnodes[2] = ElemTable->node_key_[6][ndx];\n\t\tnodes[3] = ElemTable->node_key_[3][ndx];\n\t\tnodes[4] = NewNodeKey[9];\n\t\tnodes[5] = NewNodeKey[12];\n\t\tnodes[6] = NewNodeKey[14];\n\t\tnodes[7] = NewNodeKey[11];\n\t\tnodes[8] = NewNodeKey[3];\n\n\t\tnodes_ndx[0] = ElemTable->node_key_ndx_[7][ndx];\n nodes_ndx[1] = ElemTable->node_bubble_ndx_[ndx];\n nodes_ndx[2] = ElemTable->node_key_ndx_[6][ndx];\n nodes_ndx[3] = ElemTable->node_key_ndx_[3][ndx];\n nodes_ndx[4] = NewNodeNdx[9];\n nodes_ndx[5] = NewNodeNdx[12];\n nodes_ndx[6] = NewNodeNdx[14];\n nodes_ndx[7] = NewNodeNdx[11];\n nodes_ndx[8] = NewNodeNdx[3];\n\n\t\tn1_ndx = nodes_ndx[8];\n\t\tfor(i = 0; i < DIMENSION; i++)\n\t\t\tcoord[i] = NodeTable->coord_[i][n1_ndx];\n\n\t\t//neighbors\n\t\tneigh[0] = neigh[4] = NewNodeKey[0];\n\t\tneigh[1] = neigh[5] = NewNodeKey[2];\n\t\tif(ElemTable->neigh_proc_[6][ndx] != -2)\n\t\t\tneigh[2] = neigh[6] = ElemTable->neighbors_[6][ndx];\n\t\telse\n\t\t\tneigh[2] = neigh[6] = ElemTable->neighbors_[2][ndx];\n\t\tneigh[3] = neigh[7] = ElemTable->neighbors_[3][ndx];\n\n\t\tneigh_ndx[0] = neigh_ndx[4] = ndxSons[0];\n\t\tneigh_ndx[1] = neigh_ndx[5] = ndxSons[2];\n if(ElemTable->neigh_proc_[6][ndx] != -2)\n neigh_ndx[2] = neigh_ndx[6] = ElemTable->neighbor_ndx_[6][ndx];\n else\n neigh_ndx[2] = neigh_ndx[6] = ElemTable->neighbor_ndx_[2][ndx];\n neigh_ndx[3] = neigh_ndx[7] = ElemTable->neighbor_ndx_[3][ndx];\n\n\n\t\t//process of the neighbors\n\n\t\tneigh_proc[0] = myid;\n\t\tneigh_proc[1] = myid;\n\t\tneigh_proc[2] = (ElemTable->neigh_proc_[6][ndx] != -2) ? ElemTable->neigh_proc_[6][ndx] : ElemTable->neigh_proc_[2][ndx];\n\t\tneigh_proc[3] = ElemTable->neigh_proc_[3][ndx];\n\n\t\tneigh_proc[4] = neigh_proc[5] = neigh_proc[6] = neigh_proc[7] = -2;\n\n\t\tneigh_gen[0] = generation;\n\t\tneigh_gen[1] = generation;\n\t\tneigh_gen[2] = ElemTable->neigh_gen_[2][ndx];\n\t\tneigh_gen[3] = ElemTable->neigh_gen_[3][ndx];\n\n\t\tmy_elm_loc[0] = elm_loc[0];\n\t\tmy_elm_loc[1] = elm_loc[1] + 1;\n\t\tdpson[0] = ElemTable->drypoint_[0][ndx] * 2 + 0.5;\n\t\tdpson[1] = ElemTable->drypoint_[1][ndx] * 2 - 0.5;\n\n //init new element\n ndxQuad9P = ndxSons[3];\n ElemTable->elenode_[ndxQuad9P].init(nodes, nodes_ndx, neigh, neigh_ndx, neigh_proc, generation, my_elm_loc, NULL, neigh_gen, material,\n\t\t\t\t\t\t\t ndx, coord, ElemTable, NodeTable, myid, matprops_ptr, iwetnodefather, Awetfather,\n\t\t\t\t\t\t\t dpson);\n\t\tElemTable->which_son_[ndxQuad9P]=3; //--by jp\n\t\tElemTable->elenode_[ndxQuad9P].putel_sq(sol, err); //added by jp oct11\n\n\n\n\t\t//---CHANGING THE FATHER---\n\t\tfor(i = 0; i < 4; i++)\n\t\t{\n\t\t ElemTable->son_[i][ndx]=NewNodeKey[i];\n\t\t ElemTable->son_ndx_[i][ndx]=ndxSons[i];\n\t\t}\n\t\t// putting in brother info\n\t\tfor(i = 0; i < 4; i++)\n\t\t{\n\t\t\tElemTable->elenode_[ndxSons[i]].set_brothers(NewNodeKey);\n\t\t\tfor(int j = 0; j < 4; j++)\n\t\t\t{\n\t\t\t ElemTable->brothers_ndx_[j][ndxSons[i]]=ndxSons[j];\n\t\t\t}\n\t\t}\n\n adapted[ndx]=OLDFATHER;\n refined[ndx]=1;\n\t} #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/hadpt.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "n the innermost buffer layer as the BUFFER layer\n //@ElementsSingleLoopNoStatusCheck\n for(ti_ndx_t ndx=0;ndxsize();++ndx)\n {\n if(status[ndx] >=0 &&\n ((ElemProp->if_first_buffer_boundary(ndx, GEOFLOW_TINY) > 0)\n\t\t\t\t || (ElemProp->if_first_buffer_boundary(ndx, REFINE_THRESHOLD1) > 0)\n\t\t\t\t || (ElemProp->if_first_buffer_boundary(ndx, REFINE_THRESHOLD2) > 0)\n\t\t\t\t || (ElemProp->if_first_buffer_boundary(ndx, REFINE_THRESHOLD) > 0)))\n {\n \t\tadapted[ndx]=BUFFER;\n }\n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/hadpt.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": " \n PROFILING3_STOPADD_RESTART(HAdapt_adapt_htflush2,pt_start);\n\n TIMING3_START(t_start3);\n for(ti_ndx_t ndx=0;ndxsize();++ndx)\n {\n if(status[ndx]>=0)\n {\n EmTemp = &(elements[ndx]);\n \n switch (adapted[ndx])\n {\n case NEWBUFFER:\n printf(\"Suspicious element has adapted flag=%d\\n aborting\", adapted[ndx]);\n assert(0);\n break;\n case BUFFER:\n case NEWSON:\n case NEWFATHER:\n case NOTRECADAPTED:\n //it's an active (non ghost) element\n ElemProp->calc_d_gravity(ndx);\n ElemProp->calc_wet_dry_orient(ndx);\n break;\n case TOBEDELETED:\n //there should be no more elements to delete at this point\n printf(\"Should be already deleted aborting\", adapted[ndx]);\n assert(0);\n break;\n case -NOTRECADAPTED:\n case -NEWFATHER:\n case -NEWSON:\n case -BUFFER:\n //it's a ghost element, keep these so I don't have to move data again.\n break;\n case OLDFATHER:\n case OLDSON:\n printf(\"Suspicious element has adapted flag=%d\\n aborting\", adapted[ndx]);\n assert(0);\n break;\n default:\n //I don't know what kind of Element this is.\n cout<<\"FUBAR element type in H_adapt()!!! key={\"<key_[ndx]<<\"} adapted=\"< #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/TITAN2D/titan2d/src/adapt/updatenei.C", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)", "context_chars": 100, "text": "on from other processors */\n /*************************************************************/\n for(int iElm=0;iElmelenode_[ifather]); //Hello I'm the OLDFATHER\n ASSERT2(ElemTable->adapted_[ifather]==OLDFATHER); //sanity check\n \n NdTempNdx = ElemTable->node_bubble_ndx_[ifather];\n ASSERT2(NdTempNdx==NodeTable->lookup_ndx(ElemTable->key_[ifather]));\n NodeTable->info_[NdTempNdx]=CORNER;\n \n //These are my sons, I'm going to introduce them to my neighbors\n for(isonA = 0; isonA < 4; isonA++)\n {\n EmSonNdx[isonA] = ElemTable->son_ndx_[isonA][ifather];\n EmSon[isonA]=&(ElemTable->elenode_[EmSonNdx[isonA]]);\n ASSERT2(EmSonNdx[isonA] == ElemTable->lookup_ndx(ElemTable->son_[isonA][ifather]));\n\n if(ElemTable->node_bubble_ndx_[EmSonNdx[isonA]]!=NodeTable->lookup_ndx(ElemTable->key_[EmSonNdx[isonA]]))\n {\n printf(\"%d %d\\n\",ElemTable->node_bubble_ndx_[EmSonNdx[isonA]],NodeTable->lookup_ndx(ElemTable->key_[EmSonNdx[isonA]]));\n }\n\n ASSERT2(ElemTable->node_bubble_ndx_[EmSonNdx[isonA]]==NodeTable->lookup_ndx(ElemTable->key_[EmSonNdx[isonA]]));\n NodeTable->info_[ElemTable->node_bubble_ndx_[EmSonNdx[isonA]]]=BUBBLE;\n \n\n ASSERT2(ElemTable->node_key_ndx_[(isonA + 1) % 4 + 4][EmSonNdx[isonA]] ==\\\n NodeTable->lookup_ndx(ElemTable->node_key_[(isonA + 1) % 4 + 4][EmSonNdx[isonA]]));\n NodeTable->info_[ElemTable->node_key_ndx_[(isonA + 1) % 4 + 4][EmSonNdx[isonA]]]=SIDE;\n }\n \n //visit my neighbors on each side\n for(iside = 0; iside < 4; iside++)\n {\n \n ineigh = iside;\n ineighp4 = ineigh + 4;\n isonA = ineigh;\n isonB = (ineighp4 + 1) % 4;\n \n if(EmFather->neigh_proc(ineigh) == -1)\n {\n //handle map boundary special\n EmSon[isonA]->set_neighbor(ineigh, sfc_key_zero);\n EmSon[isonA]->set_neighbor(ineighp4, sfc_key_zero);\n EmSon[isonB]->set_neighbor(ineigh, sfc_key_zero);\n EmSon[isonB]->set_neighbor(ineighp4, sfc_key_zero);\n\n EmSon[isonA]->neighbor_ndx(ineigh, ti_ndx_doesnt_exist);\n EmSon[isonA]->neighbor_ndx(ineighp4, ti_ndx_doesnt_exist);\n EmSon[isonB]->neighbor_ndx(ineigh, ti_ndx_doesnt_exist);\n EmSon[isonB]->neighbor_ndx(ineighp4, ti_ndx_doesnt_exist);\n\n EmSon[isonA]->get_neigh_gen(ineigh, 0);\n EmSon[isonA]->get_neigh_gen(ineighp4, 0);\n EmSon[isonB]->get_neigh_gen(ineigh, 0);\n EmSon[isonB]->get_neigh_gen(ineighp4, 0);\n \n EmSon[isonA]->set_neigh_proc(ineigh, -1);\n EmSon[isonB]->set_neigh_proc(ineigh, -1);\n \n EmSon[isonA]->set_neigh_proc(ineighp4, -2);\n EmSon[isonB]->set_neigh_proc(ineighp4, -2);\n //if(Curr_El) if(IfNeighProcChange(El_Table,NodeTable,myid,Curr_El,EmFather)) assert(0);\n }\n else if((EmFather->neigh_proc(ineigh) == myid) && ((EmFather->neigh_proc(ineighp4) == myid)\n || (EmFather->neigh_proc(ineighp4) == -2)))\n {\n //case where one neighbor on this side is on my proc while the other\n //is on another proc has already been handled up above, when packing\n //the information to send to the other proc.\n \n //knock knock, Hello Neighbors\n ASSERT2(ElemTable->neighbor_ndx_[ineigh][ifather]==ElemTable->lookup_ndx(EmFather->neighbor(ineigh)));\n EmNeighOldNdx[0] = ElemTable->neighbor_ndx_[ineigh][ifather];\n EmNeighOld[0] = &(ElemTable->elenode_[EmNeighOldNdx[0]]);\n \n ASSERT2(ElemTable->neighbor_ndx_[ineighp4][ifather]==ElemTable->lookup_ndx(EmFather->neighbor(ineighp4)));\n EmNeighOldNdx[1] = ElemTable->neighbor_ndx_[ineighp4][ifather];\n EmNeighOld[1] = &(ElemTable->elenode_[EmNeighOldNdx[1]]);\n\n EmNeighNew[0] = EmNeighNew[1] = EmNeighNew[2] = EmNeighNew[3] = NULL;\n EmNeighNewNdx[0] = EmNeighNewNdx[1] = EmNeighNewNdx[2] = EmNeighNewNdx[3] = ti_ndx_unknown;\n \n for(ineighme = 0; ineighme < 8; ineighme++)\n {\n if(EmFather->key()==EmNeighOld[1]->neighbor(ineighme))\n break;\n \n }\n if(!(ineighme < 8))\n {\n cout<<\"FUBAR 0 detected in refine_neigh_update\\nEmFather={\"<key()<<\"}\\n\";\n cout<<\"EmNeighOld[0]={\"<key()<<\"} ineigh=\"<generation() - EmFather->generation())\n {\n case -1:\n //this is a case A\n inewcase = 0;\n assert(EmNeighOld[0]->adapted_flag()==OLDFATHER);\n ineighme = ineighmep4 = -1; //for sanity check\n break;\n case 0:\n assert(ineighme < 4);\n ineighmep4 = ineighme + 4;\n \n if(EmNeighOld[0]->adapted_flag() == OLDFATHER)\n {\n //this is a case C\n inewcase = 2;\n \n EmNeighNewNdx[0] = EmNeighOld[0]->son_ndx((ineighme + 1) % 4);\n EmNeighNew[0]=&(ElemTable->elenode_[EmNeighNewNdx[0]]);\n ASSERT2(EmNeighNewNdx[0] == ElemTable->lookup_ndx(EmNeighOld[0]->son((ineighme + 1) % 4)));\n\n EmNeighNewNdx[1] = EmNeighOld[0]->son_ndx(ineighme);\n EmNeighNew[1] = &(ElemTable->elenode_[EmNeighNewNdx[1]]);\n\n\n EmNeighNew[2] = EmNeighNew[3] = NULL;\n EmNeighNewNdx[2] = EmNeighNewNdx[3] = ti_ndx_unknown;\n }\n else\n {\n //this is a case B\n if(EmNeighOld[0]->adapted_flag() > TOBEDELETED)\n {\n inewcase = 1;\n EmNeighNew[0] = EmNeighOld[0];\n EmNeighNewNdx[0] = EmNeighOldNdx[0];\n EmNeighNew[1] = EmNeighNew[2] = EmNeighNew[3] = NULL;\n EmNeighNewNdx[1] = EmNeighNewNdx[2] = EmNeighNewNdx[3] = ti_ndx_unknown;\n\n }\n else\n inewcase = 0;\n \n }\n break;\n case 1:\n assert(ineighme < 4);\n ineighmep4 = ineighme + 4;\n \n if((EmNeighOld[0]->adapted_flag() == OLDFATHER) || (EmNeighOld[1]->adapted_flag() == OLDFATHER))\n {\n //this is a case E\n inewcase = 3;\n \n if(EmNeighOld[0]->adapted_flag() == OLDFATHER)\n {\n EmNeighNewNdx[0] = EmNeighOld[0]->son_ndx((ineighme + 1) % 4);\n EmNeighNew[0]=&(ElemTable->elenode_[EmNeighNewNdx[0]]);\n ASSERT2(EmNeighNewNdx[0] == ElemTable->lookup_ndx(EmNeighOld[0]->son((ineighme + 1) % 4)));\n \n EmNeighNewNdx[1] = EmNeighOld[0]->son_ndx(ineighme);\n EmNeighNew[1] = &(ElemTable->elenode_[EmNeighNewNdx[1]]);\n ASSERT2(EmNeighNewNdx[1] == ElemTable->lookup_ndx(EmNeighOld[0]->son(ineighme)));\n }\n else\n {\n EmNeighNew[1] = EmNeighNew[0] = EmNeighOld[0];\n EmNeighNewNdx[1] = EmNeighNewNdx[0] = EmNeighOldNdx[0];\n }\n \n if(EmNeighOld[1]->adapted_flag() == OLDFATHER)\n {\n EmNeighNewNdx[2] = EmNeighOld[1]->son_ndx((ineighme + 1) % 4);\n EmNeighNew[2] = &(ElemTable->elenode_[EmNeighNewNdx[2]]);\n ASSERT2(EmNeighNewNdx[2] == ElemTable->lookup_ndx(EmNeighOld[1]->son((ineighme + 1) % 4)));\n \n EmNeighNewNdx[3] = EmNeighOld[1]->son_ndx(ineighme);\n EmNeighNew[3] = &(ElemTable->elenode_[EmNeighNewNdx[3]]);\n ASSERT2(EmNeighNewNdx[3] == ElemTable->lookup_ndx(EmNeighOld[1]->son(ineighme)));\n }\n else\n {\n EmNeighNew[3] = EmNeighNew[2] = EmNeighOld[1];\n EmNeighNewNdx[3] = EmNeighNewNdx[2] = EmNeighOldNdx[1];\n }\n }\n else\n {\n //this is a case D\n inewcase = 2;\n \n EmNeighNew[0] = EmNeighOld[0];\n EmNeighNew[1] = EmNeighOld[1];\n EmNeighNew[2] = EmNeighNew[3] = NULL;\n\n EmNeighNewNdx[0] = EmNeighOldNdx[0];\n EmNeighNewNdx[1] = EmNeighOldNdx[1];\n EmNeighNewNdx[2] = EmNeighNewNdx[3] = ti_ndx_unknown;\n\n }\n break;\n default:\n inewcase = -1;\n \n printf(\"FUBAR 1 detected in refine_neigh_update! aborting.\\n\");\n assert(0);\n break;\n } //switch based on difference in generation between me and my old neighbor, this is used to reduce the number of cases from 5 to 3 (based on new neighbor generation)\n \n //sanity check\n assert((ineigh >= 0) && (ineigh < 4));\n assert(ineighp4 == ineigh + 4);\n if(inewcase)\n {\n assert((ineighme >= 0) && (ineighme < 4));\n assert(ineighmep4 == ineighme + 4);\n }\n \n //now only deal with the new cases, and yes I know that I\n //am resetting neighbor information in ghost cells but\n //not neighbor information of the original cells on other\n //processors, I'm going to fix that in a minute\n switch (inewcase)\n {\n case 0:\n //case A\n break;\n case 1:\n //case B\n //new neighbor generation is my (the OLDFATHER) generation\n EmNeighNew[0]->set_neighbor(ineighme, EmSon[isonB]->key());\n EmNeighNew[0]->set_neighbor(ineighmep4, EmSon[isonA]->key());\n\n EmSon[isonA]->set_neighbor(ineigh, EmNeighNew[0]->key());\n EmSon[isonA]->set_neighbor(ineighp4, EmNeighNew[0]->key());\n EmSon[isonB]->set_neighbor(ineigh, EmNeighNew[0]->key());\n EmSon[isonB]->set_neighbor(ineighp4, EmNeighNew[0]->key());\n \n EmNeighNew[0]->neighbor_ndx(ineighme, EmSonNdx[isonB]);\n EmNeighNew[0]->neighbor_ndx(ineighmep4, EmSonNdx[isonA]);\n\n EmSon[isonA]->neighbor_ndx(ineigh, EmNeighNewNdx[0]);\n EmSon[isonA]->neighbor_ndx(ineighp4, EmNeighNewNdx[0]);\n EmSon[isonB]->neighbor_ndx(ineigh, EmNeighNewNdx[0]);\n EmSon[isonB]->neighbor_ndx(ineighp4, EmNeighNewNdx[0]);\n\n EmNeighNew[0]->get_neigh_gen(ineighme, EmSon[isonA]->generation());\n EmNeighNew[0]->get_neigh_gen(ineighmep4, EmSon[isonA]->generation());\n \n EmSon[isonA]->get_neigh_gen(ineigh, EmNeighNew[0]->generation());\n EmSon[isonA]->get_neigh_gen(ineighp4, EmNeighNew[0]->generation());\n EmSon[isonB]->get_neigh_gen(ineigh, EmNeighNew[0]->generation());\n EmSon[isonB]->get_neigh_gen(ineighp4, EmNeighNew[0]->generation());\n \n EmSon[isonA]->set_neigh_proc(ineighp4, -2);\n EmSon[isonB]->set_neigh_proc(ineighp4, -2);\n \n EmNeighNew[0]->set_neigh_proc(ineighme, EmFather->myprocess());\n EmNeighNew[0]->set_neigh_proc(ineighmep4, EmFather->myprocess());\n \n //update the nodes on this side\n //The new difference in generation tells me the OLDFATHER's\n //2 corner nodes on this side are actually CORNER's and not\n //S_C_CON's\n \n inode = ineigh;\n ASSERT2(node_key_ndx[inode][ifather] == NodeTable->lookup_ndx(node_key[inode][ifather]));\n node_info[node_key_ndx[inode][ifather]]=CORNER;\n \n inode = ineighp4;\n ASSERT2(node_key_ndx[inode][EmSonNdx[isonA]]==NodeTable->lookup_ndx(node_key[inode][EmSonNdx[isonA]]));\n node_info[node_key_ndx[inode][EmSonNdx[isonA]]]=S_S_CON;\n \n ASSERT2(node_key_ndx[inode][ifather] == NodeTable->lookup_ndx(node_key[inode][ifather]));\n node_info[node_key_ndx[inode][ifather]]=S_C_CON;\n \n ASSERT2(node_key_ndx[inode][EmSonNdx[isonB]]==NodeTable->lookup_ndx(node_key[inode][EmSonNdx[isonB]]));\n node_info[node_key_ndx[inode][EmSonNdx[isonB]]]=S_S_CON;\n \n inode = (ineigh + 1) % 4;\n ASSERT2(node_key_ndx[inode][ifather] == NodeTable->lookup_ndx(node_key[inode][ifather]));\n node_info[node_key_ndx[inode][ifather]]=CORNER;\n \n break;\n case 2:\n //cases C & D\n //new neighbor generation is my son's generation\n EmNeighNew[0]->set_neighbor(ineighme, EmSon[isonA]->key());\n EmNeighNew[0]->set_neighbor(ineighmep4, EmSon[isonA]->key());\n EmNeighNew[1]->set_neighbor(ineighme, EmSon[isonB]->key());\n EmNeighNew[1]->set_neighbor(ineighmep4, EmSon[isonB]->key());\n \n EmSon[isonA]->set_neighbor(ineigh, EmNeighNew[0]->key());\n EmSon[isonA]->set_neighbor(ineighp4, EmNeighNew[0]->key());\n EmSon[isonB]->set_neighbor(ineigh, EmNeighNew[1]->key());\n EmSon[isonB]->set_neighbor(ineighp4, EmNeighNew[1]->key());\n \n EmNeighNew[0]->neighbor_ndx(ineighme, EmSonNdx[isonA]);\n EmNeighNew[0]->neighbor_ndx(ineighmep4, EmSonNdx[isonA]);\n EmNeighNew[1]->neighbor_ndx(ineighme, EmSonNdx[isonB]);\n EmNeighNew[1]->neighbor_ndx(ineighmep4, EmSonNdx[isonB]);\n\n EmSon[isonA]->neighbor_ndx(ineigh, EmNeighNewNdx[0]);\n EmSon[isonA]->neighbor_ndx(ineighp4, EmNeighNewNdx[0]);\n EmSon[isonB]->neighbor_ndx(ineigh, EmNeighNewNdx[1]);\n EmSon[isonB]->neighbor_ndx(ineighp4, EmNeighNewNdx[1]);\n\n EmNeighNew[0]->get_neigh_gen(ineighme, EmSon[isonA]->generation());\n EmNeighNew[0]->get_neigh_gen(ineighmep4, EmSon[isonA]->generation());\n EmNeighNew[1]->get_neigh_gen(ineighme, EmSon[isonA]->generation());\n EmNeighNew[1]->get_neigh_gen(ineighmep4, EmSon[isonA]->generation());\n \n EmNeighNew[0]->set_neigh_proc(ineighmep4, -2);\n EmNeighNew[1]->set_neigh_proc(ineighmep4, -2);\n EmSon[isonA]->set_neigh_proc(ineighp4, -2);\n EmSon[isonB]->set_neigh_proc(ineighp4, -2);\n \n EmSon[isonA]->get_neigh_gen(ineigh, EmNeighNew[0]->generation());\n EmSon[isonA]->get_neigh_gen(ineighp4, EmNeighNew[0]->generation());\n EmSon[isonB]->get_neigh_gen(ineigh, EmNeighNew[0]->generation());\n EmSon[isonB]->get_neigh_gen(ineighp4, EmNeighNew[0]->generation());\n \n EmSon[isonA]->set_neigh_proc(ineigh, EmNeighNew[0]->myprocess());\n \n EmSon[isonB]->set_neigh_proc(ineigh, EmNeighNew[1]->myprocess());\n \n EmNeighNew[0]->set_neigh_proc(ineighme, EmFather->myprocess());\n EmNeighNew[1]->set_neigh_proc(ineighme, EmFather->myprocess());\n \n //update the nodes on this side\n //don't update my corner nodes because they could be S_C_CON's\n //if they should be S_C_CON's and I reset them to CORNERs I\n //will no longer conserve mass/volume in a dramatically\n //observable fashion\n \n if(EmSon[isonA]->neigh_gen((ineigh + 3) % 4) == EmSon[isonA]->generation())\n {\n //neighbor before (tested here) and after this (the ineigh)\n //corner (i.e. the ineigh neighbor) are the same generation\n //as me, therefor this (the ineigh) node is a CORNER and not\n //an S_C_CON node\n inode = ineigh;\n ASSERT2(node_key_ndx[inode][ifather] == NodeTable->lookup_ndx(node_key[inode][ifather]));\n node_info[node_key_ndx[inode][ifather]]=CORNER;\n }\n \n inode = ineigh + 4;\n ASSERT2(node_key_ndx[inode][EmSonNdx[isonA]]==NodeTable->lookup_ndx(node_key[inode][EmSonNdx[isonA]]));\n node_info[node_key_ndx[inode][EmSonNdx[isonA]]]=SIDE;\n \n ASSERT2(node_key_ndx[inode][ifather] == NodeTable->lookup_ndx(node_key[inode][ifather]));\n node_info[node_key_ndx[inode][ifather]]=CORNER;\n \n ASSERT2(node_key_ndx[inode][EmSonNdx[isonB]]==NodeTable->lookup_ndx(node_key[inode][EmSonNdx[isonB]]));\n node_info[node_key_ndx[inode][EmSonNdx[isonB]]]=SIDE;\n \n if(EmSon[isonB]->neigh_gen((ineigh + 1) % 4) == EmSon[isonB]->generation())\n {\n //neighbor before (i.e. the ineigh neighbor) and after\n //(tested here) this (the (ineigh+1)%4) corner are the\n //the same generation as me, therefore this (the\n //(ineigh+1)%4) node is a CORNER and not an S_C_CON node\n inode = (ineigh + 1) % 4;\n ASSERT2(node_key_ndx[inode][ifather] == NodeTable->lookup_ndx(node_key[inode][ifather]));\n node_info[node_key_ndx[inode][ifather]]=CORNER;\n }\n \n break;\n case 3:\n //case E\n \n //update the nodes on this side\n \n inode = ineigh; //father corner node\n ASSERT2(node_key_ndx[inode][ifather] == NodeTable->lookup_ndx(node_key[inode][ifather]));\n node_info[node_key_ndx[inode][ifather]]=CORNER;\n \n inode = ineighp4; //father edge node\n ASSERT2(node_key_ndx[inode][ifather] == NodeTable->lookup_ndx(node_key[inode][ifather]));\n node_info[node_key_ndx[inode][ifather]]=CORNER;\n \n inode = (ineigh + 1) % 4; //father corner node\n ASSERT2(node_key_ndx[inode][ifather] == NodeTable->lookup_ndx(node_key[inode][ifather]));\n node_info[node_key_ndx[inode][ifather]]=CORNER;\n \n \n EmNeighNew[0]->set_neighbor(ineighme, EmSon[isonA]->key());\n EmNeighNew[0]->set_neighbor(ineighmep4, EmSon[isonA]->key());\n EmNeighNew[1]->set_neighbor(ineighme, EmSon[isonA]->key());\n EmNeighNew[1]->set_neighbor(ineighmep4, EmSon[isonA]->key());\n\n EmNeighNew[2]->set_neighbor(ineighme, EmSon[isonB]->key());\n EmNeighNew[2]->set_neighbor(ineighmep4, EmSon[isonB]->key());\n EmNeighNew[3]->set_neighbor(ineighme, EmSon[isonB]->key());\n EmNeighNew[3]->set_neighbor(ineighmep4, EmSon[isonB]->key());\n\n EmSon[isonA]->set_neighbor(ineigh, EmNeighNew[0]->key());\n EmSon[isonA]->set_neighbor(ineighp4, EmNeighNew[1]->key());\n\n EmSon[isonB]->set_neighbor(ineigh, EmNeighNew[2]->key());\n EmSon[isonB]->set_neighbor(ineighp4, EmNeighNew[3]->key());\n \n EmNeighNew[0]->neighbor_ndx(ineighme, EmSonNdx[isonA]);\n EmNeighNew[0]->neighbor_ndx(ineighmep4, EmSonNdx[isonA]);\n EmNeighNew[1]->neighbor_ndx(ineighme, EmSonNdx[isonA]);\n EmNeighNew[1]->neighbor_ndx(ineighmep4, EmSonNdx[isonA]);\n\n EmNeighNew[2]->neighbor_ndx(ineighme, EmSonNdx[isonB]);\n EmNeighNew[2]->neighbor_ndx(ineighmep4, EmSonNdx[isonB]);\n EmNeighNew[3]->neighbor_ndx(ineighme, EmSonNdx[isonB]);\n EmNeighNew[3]->neighbor_ndx(ineighmep4, EmSonNdx[isonB]);\n\n EmSon[isonA]->neighbor_ndx(ineigh, EmNeighNewNdx[0]);\n EmSon[isonA]->neighbor_ndx(ineighp4, EmNeighNewNdx[1]);\n\n EmSon[isonB]->neighbor_ndx(ineigh, EmNeighNewNdx[2]);\n EmSon[isonB]->neighbor_ndx(ineighp4, EmNeighNewNdx[3]);\n\n //if(Curr_El) if(IfNeighProcChange(El_Table,NodeTable,myid,Curr_El,EmFather)) assert(0);\n \n EmNeighNew[0]->get_neigh_gen(ineighme, EmSon[isonA]->generation());\n EmNeighNew[0]->get_neigh_gen(ineighmep4, EmSon[isonA]->generation());\n EmNeighNew[1]->get_neigh_gen(ineighme, EmSon[isonA]->generation());\n EmNeighNew[1]->get_neigh_gen(ineighmep4, EmSon[isonA]->generation());\n EmNeighNew[2]->get_neigh_gen(ineighme, EmSon[isonA]->generation());\n EmNeighNew[2]->get_neigh_gen(ineighmep4, EmSon[isonA]->generation());\n EmNeighNew[3]->get_neigh_gen(ineighme, EmSon[isonA]->generation());\n EmNeighNew[3]->get_neigh_gen(ineighmep4, EmSon[isonA]->generation());\n \n EmSon[isonA]->get_neigh_gen(ineigh, EmNeighNew[0]->generation());\n EmSon[isonA]->get_neigh_gen(ineighp4, EmNeighNew[0]->generation());\n \n EmSon[isonB]->get_neigh_gen(ineigh, EmNeighNew[2]->generation());\n EmSon[isonB]->get_neigh_gen(ineighp4, EmNeighNew[2]->generation());\n \n EmNeighNew[0]->set_neigh_proc(ineighmep4, -2);\n EmNeighNew[1]->set_neigh_proc(ineighmep4, -2);\n EmNeighNew[2]->set_neigh_proc(ineighmep4, -2);\n EmNeighNew[3]->set_neigh_proc(ineighmep4, -2);\n \n //if(Curr_El) if(IfNeighProcChange(El_Table,NodeTable,myid,Curr_El,EmFather)) assert(0);\n \n EmSon[isonA]->set_neigh_proc(ineigh, EmFather->neigh_proc(ineigh));\n \n //if(Curr_El) if(IfNeighProcChange(El_Table,NodeTable,myid,Curr_El,EmFather)) assert(0);\n \n EmSon[isonB]->set_neigh_proc(ineigh, EmFather->neigh_proc(ineighp4));\n \n //if(Curr_El) if(IfNeighProcChange(El_Table,NodeTable,myid,Curr_El,EmFather)) assert(0);\n \n inode = ineighp4; //sonA edge node\n ASSERT2(node_key_ndx[inode][EmSonNdx[isonA]]==NodeTable->lookup_ndx(node_key[inode][EmSonNdx[isonA]]));\n\n if(EmSon[isonA]->neighbor(ineigh)==EmSon[isonA]->neighbor(ineighp4))\n {\n EmSon[isonA]->set_neigh_proc(ineighp4, -2);\n node_info[node_key_ndx[inode][EmSonNdx[isonA]]]=SIDE;\n }\n else\n {\n EmSon[isonA]->set_neigh_proc(ineighp4, EmSon[isonA]->neigh_proc(ineigh));\n \n node_info[node_key_ndx[inode][EmSonNdx[isonA]]]=S_C_CON;\n \n inode = ineighmep4;\n \n ASSERT2(node_key_ndx[inode][EmNeighNewNdx[0]] == NodeTable->lookup_ndx(node_key[inode][EmNeighNewNdx[0]]));\n node_info[node_key_ndx[inode][EmNeighNewNdx[0]]]=S_S_CON;\n \n ASSERT2(node_key_ndx[inode][EmNeighNewNdx[1]] == NodeTable->lookup_ndx(node_key[inode][EmNeighNewNdx[1]]));\n node_info[node_key_ndx[inode][EmNeighNewNdx[1]]]=S_S_CON;\n }\n \n inode = ineighp4; //sonB edge node\n ASSERT2(node_key_ndx[inode][EmSonNdx[isonB]]==NodeTable->lookup_ndx(node_key[inode][EmSonNdx[isonB]]));\n if(EmSon[isonB]->neighbor(ineigh)==EmSon[isonB]->neighbor(ineighp4))\n {\n EmSon[isonB]->set_neigh_proc(ineighp4, -2);\n node_info[node_key_ndx[inode][EmSonNdx[isonB]]]=SIDE;\n }\n else\n {\n EmSon[isonB]->set_neigh_proc(ineighp4, EmSon[isonB]->neigh_proc(ineigh));\n \n node_info[node_key_ndx[inode][EmSonNdx[isonB]]]=S_C_CON;\n \n inode = ineighmep4;\n \n ASSERT2(node_key_ndx[inode][EmNeighNewNdx[2]] == NodeTable->lookup_ndx(node_key[inode][EmNeighNewNdx[2]]));\n node_info[node_key_ndx[inode][EmNeighNewNdx[2]]]=S_S_CON;\n \n ASSERT2(node_key_ndx[inode][EmNeighNewNdx[3]] == NodeTable->lookup_ndx(node_key[inode][EmNeighNewNdx[3]]));\n node_info[node_key_ndx[inode][EmNeighNewNdx[3]]]=S_S_CON;\n }\n \n break;\n default:\n printf(\"FUBAR 2 detected in refine_neigh_update! aborting.\\n\");\n assert(0);\n \n break;\n } //switch(inewcase), case based on generation of my new neighbor\n \n } //else: not a map boundary\n \n } //iside loop\n \n } #pragma omp parallel for schedule(dynamic,TITAN2D_DINAMIC_CHUNK)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try20/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "n euclidean_distance[id1 * n + id2];\n}\n\nstatic INLINE void calcEuclideanDistanceAndStoreInArray() {\nfor (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n euclidean_distance[i * n + j] = euclidean_distance[j * n + i] =\n (i == j) ? 0 : calc_distance(i, j);\n }\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try20/pivot.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 200) num_threads(thread_count)", "context_chars": 100, "text": "*******************************************\n int len = c_n_m(n, m);\n// set omp thread number\nfor (int i = 0; i < len; i++) {\n int tid = omp_get_thread_num();\n int *values = object[i].values;\n for (int __m__ = 0; __m__ < m; __m__++) {\n int pivot_id = values[__m__];\n int _d2 = pivot_id * n;\n for (int __n__ = 0; __n__ < n; __n__++) {\n cache_eu_dist[tid][__m__ * n + __n__] =\n euclidean_distance[_d2 + __n__];\n }\n }\n for (int __i__ = 0; __i__ < n; __i__++) {\n int bound = __i__ + 1;\n for (int __j__ = bound; __j__ < n; __j__++) {\n object[i].cost +=\n calcOneChebyshevDistance(__i__, __j__, tid);\n }\n }\n } #pragma omp parallel for schedule(static, 200) num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try9/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "nDistanceAndStoreInArray() {\n // when adding this pragma, the program can be really fast!\n // for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n euclidean_distance[i * n + j] = get_distance(i, j);\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try9/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "tf(\"c_n_m = %d, each_thread_works = %d\\n\", c_n_m(n, m),\n each_thread_works);\n\n for (int __thread__ = 0; __thread__ < thread_count; __thread__++) {\n struct timeval start1, end1;\n gettimeofday(&start1, NULL);\n // *********************************************************\n int base_index = __thread__ * each_thread_works;\n int end_index = base_index + each_thread_works;\n if (end_index > c_n_m(n, m)) {\n end_index = c_n_m(n, m);\n }\n\n for (int i = base_index; i < end_index; i++) {\n united_calc_and_add(object[i].values, &object[i].cost);\n }\n\n gettimeofday(&end1, NULL);\n printf(\"thread %d finished, time = %lf ms\\n\", __thread__,\n (end1.tv_sec - start1.tv_sec) * 1000 +\n (end1.tv_usec - start1.tv_usec) / 1000.0);\n // *********************************************************\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try6/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "nDistanceAndStoreInArray() {\n // when adding this pragma, the program can be really fast!\n // for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n euclidean_distance[i * n + j] = get_distance(i, j);\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try6/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "bination));\n com->values = (int *)malloc(sizeof(int) * m);\n com->cost = 0;\n // for (int i = 0; i < c_n_m(n, m); i++) {\n com = next_combination(com); // CRITICAL SECTION \n if (com == NULL) {\n break;\n }\n int *values = com->values;\n // struct timeval start, end;\n // gettimeofday(&start, NULL);\n calcAllChebyshevDistanceAndStoreInArray(chebyshev_matrix,\n values); // cost about 1 ms\n // gettimeofday(&end, NULL);\n // printf(\"calcAllChebyshevDistanceAndStoreInArray() time: %ld\n // ms\\n\",\n // (end.tv_sec - start.tv_sec) * 1000 +\n // (end.tv_usec - start.tv_usec) / 1000);\n float res = add_all_entries_of_matrix(chebyshev_matrix);\n // float res = 0;\n com->cost = res;\n // store the combination in object array\n // object had been fully allocated in the beginning.\n store_in_object(com); // CRITICAL SECTION \n if (res_index % 1000 == 0) {\n printf(\"combination %d finished, i = %d \\n\\n\", res_index, i);\n }\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try7/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "nDistanceAndStoreInArray() {\n // when adding this pragma, the program can be really fast!\n // for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n euclidean_distance[i * n + j] = get_distance(i, j);\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try7/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "\n omp_lock_t LOCK_1, LOCK_2;\n omp_init_lock(&LOCK_1);\n omp_init_lock(&LOCK_2);\nfor (int __thread__ = 0; __thread__ < thread_count; __thread__++) {\n // *********************************************************\n // int chebyshev_matrix_set = 0;\n double **chebyshev_matrix = (double **)malloc(sizeof(double *) * n);\n for (int j = 0; j < n; j++) {\n chebyshev_matrix[j] = (double *)malloc(sizeof(double) * n);\n };\n combination *com = (combination *)malloc(sizeof(combination));\n com->values = (int *)malloc(sizeof(int) * m);\n com->cost = 0;\n for (int i = 0; i < each_thread_works; i++) {\n // lock1\n omp_set_lock(&LOCK_1);\n com = next_combination(com); // CRITICAL SECTION\n omp_unset_lock(&LOCK_1);\n if (com == NULL) {\n printf(\"thread %d finished, breaking...\\n\", __thread__);\n break;\n }\n int *values = com->values;\n // struct timeval start, end;\n // gettimeofday(&start, NULL);\n calcAllChebyshevDistanceAndStoreInArray(\n chebyshev_matrix,\n values); // cost about 1 ms\n // gettimeofday(&end, NULL);\n // printf(\"calcAllChebyshevDistanceAndStoreInArray() time: %ld\n // ms\\n\",\n // (end.tv_sec - start.tv_sec) * 1000 +\n // (end.tv_usec - start.tv_usec) / 1000);\n double res = add_all_entries_of_matrix(chebyshev_matrix);\n // double res = 0;\n com->cost = res;\n // store the combination in object array\n // object had been fully allocated in the beginning.\n // lock2\n omp_set_lock(&LOCK_2);\n store_in_object(com); // CRITICAL SECTION\n omp_unset_lock(&LOCK_2);\n if (res_index % 1000 == 0) {\n printf(\"combination %d finished, i = %d, thread = %d \\n\\n\",\n res_index, i, __thread__);\n }\n }\n if (!com == NULL) {\n free(com->values);\n free(com);\n }\n for (int j = 0; j < n; j++) {\n free(chebyshev_matrix[j]);\n }\n free(chebyshev_matrix);\n\n // *********************************************************\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try16/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "2)\nstatic INLINE double calc_distance(int id1, int id2) {\n double sum = 0;\n // 更改5,没作用\n // for (int i = 0; i < dim; i++) {\n double diff = get_point_coordinate_of_id_and_dimension(id1, i) -\n get_point_coordinate_of_id_and_dimension(id2, i);\n sum += diff * diff;\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try16/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "id2];\n}\n\n// 计算每对顶点的欧几里得距离并存储在数组中\nstatic INLINE void calcEuclideanDistanceAndStoreInArray() {\n // // 更改3: 加入collapse(2)\n #pragma omp parallel for num_threads(thread_count)\n for (int i = 0; i < n; i++) {\n // 更改4,大概优化100ms\n for (int j = i; j < n; j++) {\n // if (i == j) {\n // euclidean_distance[i * n + j] = 0;\n // } else {\n // euclidean_distance[i * n + j] = calc_distance(i, j);\n // }\n // *更改1:更换为三目运算符\n // euclidean_distance[i * n + j] = (i == j) ? 0 : calc_distance(i, j);\n // 更改4: d(i,j) = d(j,i)\n euclidean_distance[i * n + j] = euclidean_distance[j * n + i] = (i == j) ? 0 : calc_distance(i, j);\n\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try16/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "InArray() {\n // #pragma omp parallel for num_threads(thread_count)\n // 更改3: 加入collapse(2)\n for (int i = 0; i < n; i++) {\n // 更改4,大概优化100ms\n for (int j = i; j < n; j++) {\n // if (i == j) {\n // euclidean_distance[i * n + j] = 0;\n // } else {\n // euclidean_distance[i * n + j] = calc_distance(i, j);\n // }\n // *更改1:更换为三目运算符\n // euclidean_distance[i * n + j] = (i == j) ? 0 : calc_distance(i, j);\n // 更改4: d(i,j) = d(j,i)\n euclidean_distance[i * n + j] = euclidean_distance[j * n + i] = (i == j) ? 0 : calc_distance(i, j);\n\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try16/pivot.c", "omp_pragma_line": "#pragma omp parallel for ?", "context_chars": 100, "text": "bject\n object = (combination *)malloc(sizeof(combination) * c_n_m(n, m));\n // *更改,可以加 // #pragma omp parallel for (变慢了)\n for (int i = 0; i < c_n_m(n, m); i++) {\n object[i].values = (int *)malloc(sizeof(int) * m);\n object[i].cost = 0;\n } #pragma omp parallel for ?"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try16/pivot.c", "omp_pragma_line": "#pragma omp parallel for (变慢了)", "context_chars": 100, "text": "malloc(sizeof(combination) * c_n_m(n, m));\n // *更改,可以加 #pragma omp parallel for ?\n // for (int i = 0; i < c_n_m(n, m); i++) {\n object[i].values = (int *)malloc(sizeof(int) * m);\n object[i].cost = 0;\n } #pragma omp parallel for (变慢了)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try16/pivot.c", "omp_pragma_line": "#pragma omp parallel for schedule(static,200) num_threads(thread_count) ", "context_chars": 100, "text": "en = c_n_m(n, m);\n // set omp thread number\n // Q: 怎么确定的schedule(static,200)?\n // #pragma omp parallel for schedule(guided) num_threads(thread_count)\n for (int i = 0; i < len; i++) {\n int tid = omp_get_thread_num();\n int d1 = tid * m * n;\n int *values = object[i].values;\n // 计算cube中d1层的矩阵\n for (int __m__ = 0; __m__ < m; __m__++) {\n int d2 = d1 + __m__ * n;\n int pivot_id = values[__m__];\n int _d2 = pivot_id * n;\n for (int __n__ = 0; __n__ < n; __n__++) {\n cache_eu_dist[d2 + __n__] = euclidean_distance[_d2 + __n__];\n }\n }\n // 计算每两点之间的切比雪夫距离\n for (int __i__ = 0; __i__ < n; __i__++) {\n for (int __j__ = __i__ + 1; __j__ < n; __j__++) {\n object[i].cost += calcOneChebyshevDistance(__i__, __j__, tid);\n }\n }\n } #pragma omp parallel for schedule(static,200) num_threads(thread_count) "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try16/pivot.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) num_threads(thread_count)", "context_chars": 100, "text": "c,200)?\n #pragma omp parallel for schedule(static,200) num_threads(thread_count) \n // for (int i = 0; i < len; i++) {\n int tid = omp_get_thread_num();\n int d1 = tid * m * n;\n int *values = object[i].values;\n // 计算cube中d1层的矩阵\n for (int __m__ = 0; __m__ < m; __m__++) {\n int d2 = d1 + __m__ * n;\n int pivot_id = values[__m__];\n int _d2 = pivot_id * n;\n for (int __n__ = 0; __n__ < n; __n__++) {\n cache_eu_dist[d2 + __n__] = euclidean_distance[_d2 + __n__];\n }\n }\n // 计算每两点之间的切比雪夫距离\n for (int __i__ = 0; __i__ < n; __i__++) {\n for (int __j__ = __i__ + 1; __j__ < n; __j__++) {\n object[i].cost += calcOneChebyshevDistance(__i__, __j__, tid);\n }\n }\n } #pragma omp parallel for schedule(guided) num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try18/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "n euclidean_distance[id1 * n + id2];\n}\n\nstatic INLINE void calcEuclideanDistanceAndStoreInArray() {\nfor (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n if (i == j) {\n euclidean_distance[i * n + j] = 0;\n } else {\n euclidean_distance[i * n + j] = calc_distance(i, j);\n }\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try18/pivot.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 200) num_threads(thread_count)", "context_chars": 100, "text": "*******************************************\n int len = c_n_m(n, m);\n// set omp thread number\nfor (int i = 0; i < len; i++) {\n int tid = omp_get_thread_num();\n int *values = object[i].values;\n for (int __m__ = 0; __m__ < m; __m__++) {\n int pivot_id = values[__m__];\n int _d2 = pivot_id * n;\n for (int __n__ = 0; __n__ < n; __n__++) {\n cache_eu_dist[tid][__m__ * n + __n__] =\n euclidean_distance[_d2 + __n__];\n }\n }\n for (int __i__ = 0; __i__ < n; __i__++) {\n int bound = __i__ + 1;\n for (int __j__ = bound; __j__ < n; __j__++) {\n object[i].cost +=\n calcOneChebyshevDistance(__i__, __j__, tid);\n }\n }\n } #pragma omp parallel for schedule(static, 200) num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try5/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "nDistanceAndStoreInArray() {\n // when adding this pragma, the program can be really fast!\n // for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n euclidean_distance[i * n + j] = get_distance(i, j);\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try5/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": " j++) {\n chebyshev_matrix[j] = (float *)malloc(sizeof(float) * n);\n };\n // for (int i = 0; i < c_n_m(n, m); i++) {\n combination *com = next_combination(); // very quick\n if (com == NULL) {\n break;\n }\n int *values = com->values;\n // struct timeval start, end;\n // gettimeofday(&start, NULL);\n calcAllChebyshevDistanceAndStoreInArray(chebyshev_matrix,\n values); // very slow!!\n // gettimeofday(&end, NULL);\n // printf(\"calcAllChebyshevDistanceAndStoreInArray() time: %ld ms\\n\",\n // (end.tv_sec - start.tv_sec) * 1000 +\n // (end.tv_usec - start.tv_usec) / 1000);\n float res = add_all_entries_of_matrix(chebyshev_matrix);\n // float res = 0;\n com->cost = res;\n // store the combination in object array\n // object had been fully allocated in the beginning.\n store_in_object(com);\n free(com->values);\n free(com);\n if (res_index % 1000 == 0) {\n printf(\"combination %d finished, i = %d \\n\\n\", res_index, i);\n }\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try10/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": " printf(\"c_n_m = %d, each_thread_works = %d\\n\", c_n_m(n, m),\n each_thread_works);\n\nfor (int __thread__ = 0; __thread__ < thread_count; __thread__++) {\n struct timeval start1, end1;\n gettimeofday(&start1, NULL);\n // *********************************************************\n int base_index = __thread__ * each_thread_works;\n int end_index = base_index + each_thread_works;\n if (end_index > c_n_m(n, m)) {\n end_index = c_n_m(n, m);\n }\n\n for (int i = base_index; i < end_index; i++) {\n for (int j = 0; j < m; j++) {\n small_cache[__thread__][j] = object[i].values[j] * n;\n }\n for (int __i__ = 0; __i__ < n; __i__++) {\n for (int __j__ = 0; __j__ < n; __j__++) {\n if (__i__ > __j__) {\n object[i].cost += calcOneChebyshevDistance(\n __i__, __j__, object[i].values, __thread__);\n }\n }\n }\n }\n\n gettimeofday(&end1, NULL);\n printf(\"thread %d finished, time = %lf ms\\n\", __thread__,\n (end1.tv_sec - start1.tv_sec) * 1000 +\n (end1.tv_usec - start1.tv_usec) / 1000.0);\n // *********************************************************\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try3/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "alcEuclideanDistanceAndStoreInArray() {\n// when adding this pragma, the program can be really fast!\nfor (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n euclidean_distance[i * n + j] = get_distance(i, j);\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try19/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "return euclidean_distance[id1][id2];\n}\n\nstatic INLINE void calcEuclideanDistanceAndStoreInArray() {\nfor (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n if (i == j) {\n euclidean_distance[i][j] = 0;\n } else {\n euclidean_distance[i][j] = calc_distance(i, j);\n }\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try19/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "*******************************************\n int len = c_n_m(n, m);\n// set omp thread number\nfor (int i = 0; i < len; i++) {\n int tid = omp_get_thread_num();\n int *values = object[i].values;\n for (int __m__ = 0; __m__ < m; __m__++) {\n int pivot_id = values[__m__];\n cache_eu_dist[tid][__m__] = euclidean_distance[pivot_id];\n }\n for (int __i__ = 0; __i__ < n; __i__++) {\n int bound = __i__ + 1;\n for (int __j__ = bound; __j__ < n; __j__++) {\n object[i].cost +=\n calcOneChebyshevDistance(__i__, __j__, tid);\n }\n }\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try14/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "clidean_distance[id1 * n + id2];\n}\n\nstatic INLINE void calcEuclideanDistanceAndStoreInArray() {\n for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n if (i == j) {\n euclidean_distance[i * n + j] = 0;\n } else {\n euclidean_distance[i * n + j] = calc_distance(i, j);\n }\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try14/pivot.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 200) num_threads(thread_count)", "context_chars": 100, "text": "*******************************************\n int len = c_n_m(n, m);\n// set omp thread number\nfor (int i = 0; i < len; i++) {\n int tid = omp_get_thread_num();\n int d1 = tid * m * n;\n int *values = object[i].values;\n for (int __m__ = 0; __m__ < m; __m__++) {\n int d2 = d1 + __m__ * n;\n int pivot_id = values[__m__];\n int _d2 = pivot_id * n;\n for (int __n__ = 0; __n__ < n; __n__++) {\n cache_eu_dist[d2 + __n__] = euclidean_distance[_d2 + __n__];\n }\n }\n for (int __i__ = 0; __i__ < n; __i__++) {\n int bound = __i__ + 1;\n for (int __j__ = bound; __j__ < n; __j__++) {\n object[i].cost +=\n calcOneChebyshevDistance(__i__, __j__, tid);\n }\n }\n } #pragma omp parallel for schedule(static, 200) num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try11/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": " printf(\"c_n_m = %d, each_thread_works = %d\\n\", c_n_m(n, m),\n each_thread_works);\n\nfor (int __thread__ = 0; __thread__ < thread_count; __thread__++) {\n struct timeval start1, end1;\n gettimeofday(&start1, NULL);\n // *********************************************************\n int base_index = __thread__ * each_thread_works;\n int end_index = base_index + each_thread_works;\n if (end_index > c_n_m(n, m)) {\n end_index = c_n_m(n, m);\n }\n\n for (int i = base_index; i < end_index; i++) {\n for (int __i__ = 0; __i__ < n; __i__++) { // 500 times\n for (int __j__ = 0; __j__ < n; __j__++) { // 500 * 500 times\n if (__i__ > __j__) {\n object[i].cost += calcOneChebyshevDistance(\n __i__, __j__, object[i].values);\n }\n }\n }\n }\n\n gettimeofday(&end1, NULL);\n printf(\"thread %d finished, time = %lf ms\\n\", __thread__,\n (end1.tv_sec - start1.tv_sec) * 1000 +\n (end1.tv_usec - start1.tv_usec) / 1000.0);\n // *********************************************************\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try15-big-change-using-avx2/pivot.c", "omp_pragma_line": "#pragma omp parallel for ", "context_chars": 100, "text": "***********************************\n int len = c_n_m(n, m);\n // set omp thread number\nfor (int i = 0; i < len; i++) {\n // printf(\"HERE\\n\");\n int tid = omp_get_thread_num();\n int d1 = tid * m * n;\n int *values = object[i].values;\n for (int __m__ = 0; __m__ < m; __m__++) {\n int d2 = d1 + __m__ * n;\n int pivot_id = values[__m__];\n int _d2 = pivot_id * n;\n for (int __n__ = 0; __n__ < n; __n__++) {\n cache_eu_dist[d2 + __n__] = euclidean_distance[_d2 + __n__];\n }\n }\n\n // init the small cache\n int d2 = tid * m;\n for (int __n__ = 0; __n__ < n; __n__++) {\n for (int col = 0; col < block_num; col++) {\n __m256d _max = _mm256_set1_pd(0.0);\n for (int __m__ = 0; __m__ < m; __m__++) {\n int d3 = d1 + __m__ * n;\n small_cache[d2 + __m__] = cache_eu_dist[d3 + __n__];\n\n // construct __m256d every 4 elements of cache_eu_dist\n // __m256d _cache_eu_dist =\n // _mm256_loadu_pd(&cache_eu_dist[d3 + col * 4]);\n block_cache[d2 + __m__] = _mm256_andnot_pd(\n _mm256_set1_pd(-0.0),\n _mm256_sub_pd(_mm256_loadu_pd(&cache_eu_dist[d3 + col * 4]), \n _mm256_set1_pd(small_cache[d2 + __m__])));\n // for each index from 0 to 3, find the max value of\n // each column\n if (__m__ == 0) {\n _max = block_cache[d2 + __m__];\n } else {\n _max = _mm256_max_pd(_max, block_cache[d2 + __m__]);\n }\n }\n object[i].cost += hsum_double_avx(_max);\n }\n }\n } #pragma omp parallel for "} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try17/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "clidean_distance[id1 * n + id2];\n}\n\nstatic INLINE void calcEuclideanDistanceAndStoreInArray() {\n for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n if (i == j) {\n euclidean_distance[i * n + j] = 0;\n } else {\n euclidean_distance[i * n + j] = calc_distance(i, j);\n }\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try17/pivot.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 200) num_threads(thread_count)", "context_chars": 100, "text": "*******************************************\n int len = c_n_m(n, m);\n// set omp thread number\nfor (int i = 0; i < len; i++) {\n int tid_ = omp_get_thread_num();\n short unsigned int tid = (short unsigned int)tid_;\n int d1 = tid * m * n;\n short unsigned int *values = object[i].values;\n for (int __m__ = 0; __m__ < m; __m__++) {\n int d2 = d1 + __m__ * n;\n int pivot_id = values[__m__];\n int _d2 = pivot_id * n;\n for (int __n__ = 0; __n__ < n; __n__++) {\n cache_eu_dist[d2 + __n__] = euclidean_distance[_d2 + __n__];\n }\n }\n for (short unsigned int __i__ = 0; __i__ < n; __i__++) {\n short unsigned int bound = __i__ + 1;\n for (short unsigned int __j__ = bound; __j__ < n; __j__++) {\n object[i].cost +=\n calcOneChebyshevDistance(__i__, __j__, tid);\n }\n }\n } #pragma omp parallel for schedule(static, 200) num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try13/pivot.c", "omp_pragma_line": "#pragma omp parallel for schedule(static, 100) num_threads(thread_count)", "context_chars": 100, "text": "*******************************************\n int len = c_n_m(n, m);\n// set omp thread number\nfor (int i = 0; i < len; i++) {\n for (int __i__ = 0; __i__ < n; __i__++) {\n int bound = __i__ + 1;\n for (int __j__ = bound; __j__ < n; __j__++) {\n object[i].cost += calcOneChebyshevDistance(\n __i__, __j__, object[i].values);\n }\n }\n } #pragma omp parallel for schedule(static, 100) num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try12/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": " printf(\"c_n_m = %d, each_thread_works = %d\\n\", c_n_m(n, m),\n each_thread_works);\n\nfor (int __thread__ = 0; __thread__ < thread_count; __thread__++) {\n struct timeval start1, end1;\n gettimeofday(&start1, NULL);\n // *********************************************************\n int base_index = __thread__ * each_thread_works;\n int end_index = base_index + each_thread_works;\n if (end_index > c_n_m(n, m)) {\n end_index = c_n_m(n, m);\n }\n\n for (int i = base_index; i < end_index; i++) {\n for (int j = 0; j < m; j++) {\n // small_cache[__thread__][j] = object[i].values[j] * n;\n small_cache[__thread__ * m + j] = object[i].values[j];\n }\n for (int __i__ = 0; __i__ < n; __i__++) {\n for (int __j__ = 0; __j__ < n; __j__++) {\n if (__i__ > __j__) {\n object[i].cost += calcOneChebyshevDistance(\n __i__, __j__, __thread__);\n }\n }\n }\n }\n\n gettimeofday(&end1, NULL);\n printf(\"thread %d finished, time = %lf ms\\n\", __thread__,\n (end1.tv_sec - start1.tv_sec) * 1000 +\n (end1.tv_usec - start1.tv_usec) / 1000.0);\n // *********************************************************\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try8/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "nDistanceAndStoreInArray() {\n // when adding this pragma, the program can be really fast!\n // for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n euclidean_distance[i * n + j] = get_distance(i, j);\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try8/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": " printf(\"c_n_m = %d, each_thread_works = %d\\n\", c_n_m(n, m),\n each_thread_works);\n\nfor (int __thread__ = 0; __thread__ < thread_count; __thread__++) {\n struct timeval start1, end1;\n gettimeofday(&start1, NULL);\n // *********************************************************\n // int chebyshev_matrix_set = 0;\n double **chebyshev_matrix = (double **)malloc(sizeof(double *) * n);\n for (int j = 0; j < n; j++) {\n chebyshev_matrix[j] = (double *)malloc(sizeof(double) * n);\n };\n int base_index = __thread__ * each_thread_works;\n int end_index = base_index + each_thread_works;\n if (end_index > c_n_m(n, m)) {\n end_index = c_n_m(n, m);\n }\n\n for (int i = base_index; i < end_index; i++) {\n calcAllChebyshevDistanceAndStoreInArray(chebyshev_matrix,\n object[i].values);\n object[i].cost = add_all_entries_of_matrix(chebyshev_matrix);\n }\n\n for (int j = 0; j < n; j++) {\n free(chebyshev_matrix[j]);\n }\n free(chebyshev_matrix);\n gettimeofday(&end1, NULL);\n printf(\"thread %d finished, time = %lf ms\\n\", __thread__,\n (end1.tv_sec - start1.tv_sec) * 1000 +\n (end1.tv_usec - start1.tv_usec) / 1000.0);\n // *********************************************************\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try4/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "nDistanceAndStoreInArray() {\n // when adding this pragma, the program can be really fast!\n // for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n euclidean_distance[i * n + j] = get_distance(i, j);\n }\n // printf(\"calcEuclideanDistanceAndStoreInArray: %d\\n\", i);\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try4/pivot.c", "omp_pragma_line": "#pragma omp parallel for num_threads(thread_count)", "context_chars": 100, "text": "chebyshev_matrix;\n // omp_lock_t writelock;\n // omp_init_lock(&writelock);\n // for (int i = 0; i < c_n_m(n, m); i++) {\n combination *com = next_combination(); // very quick\n if (com == NULL) {\n break;\n }\n chebyshev_matrix = (float **)malloc(sizeof(float *) * n);\n for (int j = 0; j < n; j++) {\n chebyshev_matrix[j] = (float *)malloc(sizeof(float) * n);\n }\n\n int *values = com->values;\n // struct timeval start, end;\n gettimeofday(&start, NULL);\n calcAllChebyshevDistanceAndStoreInArray(chebyshev_matrix,\n values); // very slow!!\n gettimeofday(&end, NULL);\n printf(\"calcAllChebyshevDistanceAndStoreInArray() time: %ld ms\\n\",\n ((end.tv_sec * 1000000 + end.tv_usec) -\n (start.tv_sec * 1000000 + start.tv_usec)) /\n 1000);\n float res = add_all_entries_of_matrix(chebyshev_matrix);\n // float res = 0;\n com->cost = res;\n // store the combination in object array\n // object had been fully allocated in the beginning.\n store_in_object(com);\n for (int j = 0; j < n; j++) {\n free(chebyshev_matrix[j]);\n }\n free(chebyshev_matrix);\n free(com->values);\n free(com);\n if (res_index % 1000 == 0) {\n printf(\"combination %d finished, i = %d \\n\\n\", res_index, i);\n }\n } #pragma omp parallel for num_threads(thread_count)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Abhiramborige/Parallel_programs_C/sum_for_reduction.c", "omp_pragma_line": "#pragma omp parallel for default(shared) private(i) reduction(+:sum)", "context_chars": 100, "text": ";\n double t1,t2;\n for(int i=0; ifor(i=0; i #pragma omp parallel for default(shared) private(i) reduction(+:sum)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Abhiramborige/Parallel_programs_C/private.c", "omp_pragma_line": "#pragma omp parallel for firstprivate(x)", "context_chars": 100, "text": "#include\n#include\n\nint main(){\n int x=44;int i;\n for(i=0; i<10; i++){\n x=i;\n printf(\"Thread no. %d and x = %d\\n\", omp_get_thread_num(), x);\n } #pragma omp parallel for firstprivate(x)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/GuilloteauQ/omp-logs/examples/for_policies.c", "omp_pragma_line": "#pragma omp parallel for schedule(static) reduction (+:s)", "context_chars": 100, "text": "\n task_list* l = task_list_init();\n\n int s = 0;\n // A nice for in parallel with openMP\n for (int j = 0; j < N; j++) {\n // We create the structure to hold the ints\n struct data d = {j, &s};\n /* We log the task\n * We give it the info j which is the number that it is adding\n */\n log_task(&l, \"Sum\", j, omp_get_thread_num(), sum, (void*) &d);\n } #pragma omp parallel for schedule(static) reduction (+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/GuilloteauQ/omp-logs/examples/for_policies.c", "omp_pragma_line": "#pragma omp parallel for schedule(dynamic) reduction (+:s)", "context_chars": 100, "text": "or_static.svg\", 1);\n // And we free the list of tasks\n l = task_list_init();\n\n\n s = 0;\n for (int j = 0; j < N; j++) {\n struct data d = {j, &s};\n log_task(&l, \"Sum\", j, omp_get_thread_num(), sum, (void*) &d);\n\n } #pragma omp parallel for schedule(dynamic) reduction (+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/GuilloteauQ/omp-logs/examples/for_policies.c", "omp_pragma_line": "#pragma omp parallel for schedule(guided) reduction (+:s)", "context_chars": 100, "text": "*) &d);\n\n }\n tasks_to_svg(l, \"for_dynamic.svg\", 1);\n l = task_list_init();\n\n s = 0;\n for (int j = 0; j < N; j++) {\n struct data d = {j, &s};\n log_task(&l, \"Sum\", j, omp_get_thread_num(), sum, (void*) &d);\n\n } #pragma omp parallel for schedule(guided) reduction (+:s)"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abagali1/mandelbrot/parallel/mandelbrot_openmp.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(*colors)[X][3] = malloc(sizeof(uchar[Y][X][3]));\n Color* palette = make_palette(MAX_ITER);\n\n for(int Py = 0; Py < Y; Py++){\n for(int Px = 0; Px < X; Px++){\n Color c = mandelbrot(Px, Py, palette);\n colors[Py][Px][0] = c.r;\n colors[Py][Px][1] = c.g;\n colors[Py][Px][2] = c.b;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abagali1/mandelbrot/parallel/mandelbrot_cuda.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(*colors)[X][3] = malloc(sizeof(uchar[Y][X][3]));\n Color* palette = make_palette(MAX_ITER);\n\n for(int Py = 0; Py < Y; Py++){\n for(int Px = 0; Px < X; Px++){\n Color c = mandelbrot(Px, Py, palette);\n colors[Py][Px][0] = c.r;\n colors[Py][Px][1] = c.g;\n colors[Py][Px][2] = c.b;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "NULL) {\n printf(\"ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\\n\");\n return (NULL);\n }\n for (i = 0; i < m; i++) {\n new[i] = alloc_1d_dbl(n);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "(n);\n }\n\n return (new);\n}\n\n\nvoid bpnn_randomize_weights(double **w,int m,int n)\n\n{\n int i, j;\n\n for (i = 0; i <= m; i++) {\n for (j = 0; j <= n; j++) {\n w[i][j] = dpn1();\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": " w[i][j] = dpn1();\n }\n }\n}\n\n\nvoid bpnn_zero_weights(double **w,int m,int n)\n{\n int i, j;\n\n for (i = 0; i <= m; i++) {\n for (j = 0; j <= n; j++) {\n w[i][j] = 0.0;\n }\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "e((char *) net->hidden_delta);\n free((char *) net->output_delta);\n free((char *) net->target);\n\n for (i = 0; i <= n1; i++) {\n free((char *) net->input_weights[i]);\n free((char *) net->input_prev_weights[i]);\n } #pragma omp parallel for"} {"filename": "/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c", "omp_pragma_line": "#pragma omp parallel for", "context_chars": 100, "text": "_weights[i]);\n }\n free((char *) net->input_weights);\n free((char *) net->input_prev_weights);\n\n for (i = 0; i <= n2; i++) {\n free((char *) net->hidden_weights[i]);\n free((char *) net->hidden_prev_weights[i]);\n } #pragma omp parallel for"}