diff options
author | CoprDistGit <infra@openeuler.org> | 2025-02-28 10:03:49 +0000 |
---|---|---|
committer | CoprDistGit <infra@openeuler.org> | 2025-02-28 10:03:49 +0000 |
commit | 73127104a245052cd5cf29cdaaca3e5c32c70348 (patch) | |
tree | 8e28b63e478c43c252f18b49836dff7313affe54 /0316-Use-ai-ability-to-guide-optimization.patch | |
parent | 49d3feaf4665cdb07576fc1a2382a4d82a612d35 (diff) |
automatic import of gccopeneuler24.03_LTS_SP1
Diffstat (limited to '0316-Use-ai-ability-to-guide-optimization.patch')
-rw-r--r-- | 0316-Use-ai-ability-to-guide-optimization.patch | 741 |
1 files changed, 741 insertions, 0 deletions
diff --git a/0316-Use-ai-ability-to-guide-optimization.patch b/0316-Use-ai-ability-to-guide-optimization.patch new file mode 100644 index 0000000..40b9786 --- /dev/null +++ b/0316-Use-ai-ability-to-guide-optimization.patch @@ -0,0 +1,741 @@ +From 0b85ab4639e2d25314175962a6e41a841649b028 Mon Sep 17 00:00:00 2001 +From: zhenyu zhao <zhaozhenyu17@huawei.com> +Date: Sun, 24 Nov 2024 17:29:13 +0800 +Subject: [PATCH 3/5] Use ai ability to guide optimization. + +--- + gcc/Makefile.in | 8 +- + gcc/ai4c-infer.cc | 457 ++++++++++++++++++++++++++++++++++ + gcc/ai4c-infer.h | 29 +++ + gcc/config/aarch64/aarch64.cc | 14 +- + gcc/gcc.cc | 32 +++ + gcc/gcc.h | 1 + + gcc/ipa-hardware-detection.cc | 6 +- + gcc/onnx.fdata | 1 + + gcc/opts-global.cc | 10 + + 9 files changed, 550 insertions(+), 8 deletions(-) + create mode 100644 gcc/ai4c-infer.cc + create mode 100644 gcc/ai4c-infer.h + create mode 100644 gcc/onnx.fdata + +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index bb6197a8e..6315462aa 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -1734,13 +1734,13 @@ OBJS-libcommon = diagnostic-spec.o diagnostic.o diagnostic-color.o \ + pretty-print.o intl.o \ + sbitmap.o \ + vec.o input.o hash-table.o ggc-none.o memory-block.o \ +- selftest.o selftest-diagnostic.o sort.o ++ ai4c-infer.o selftest.o selftest-diagnostic.o sort.o + + # Objects in libcommon-target.a, used by drivers and by the core + # compiler and containing target-dependent code. + OBJS-libcommon-target = $(common_out_object_file) prefix.o \ + opts.o opts-common.o options.o vec.o hooks.o common/common-targhooks.o \ +- hash-table.o file-find.o spellcheck.o selftest.o opt-suggestions.o ++ hash-table.o file-find.o spellcheck.o ai4c-infer.o selftest.o opt-suggestions.o + + # This lists all host objects for the front ends. + ALL_HOST_FRONTEND_OBJS = $(foreach v,$(CONFIG_LANGUAGES),$($(v)_OBJS)) +@@ -2256,7 +2256,7 @@ gcc-nm.cc: gcc-ar.cc + cp $^ $@ + + COLLECT2_OBJS = collect2.o collect2-aix.o vec.o ggc-none.o \ +- collect-utils.o file-find.o hash-table.o selftest.o ++ collect-utils.o file-find.o hash-table.o ai4c-infer.o selftest.o + COLLECT2_LIBS = @COLLECT2_LIBS@ + collect2$(exeext): $(COLLECT2_OBJS) $(LIBDEPS) + # Don't try modifying collect2 (aka ld) in place--it might be linking this. +@@ -3720,6 +3720,8 @@ install-plugin: installdirs lang.install-plugin s-header-vars install-gengtype + + # Install the compiler executables built during cross compilation. + install-common: native lang.install-common installdirs ++ rm -f $(DESTDIR)$(libexecdir)/onnx.fdata ++ cp $(srcdir)/onnx.fdata $(DESTDIR)$(libexecsubdir)/onnx.fdata + for file in $(COMPILERS); do \ + if [ -f $$file ] ; then \ + rm -f $(DESTDIR)$(libexecsubdir)/$$file; \ +diff --git a/gcc/ai4c-infer.cc b/gcc/ai4c-infer.cc +new file mode 100644 +index 000000000..99f7a6b45 +--- /dev/null ++++ b/gcc/ai4c-infer.cc +@@ -0,0 +1,457 @@ ++/* Lightweight AI Inference Framework. ++ Copyright (C) 2024-2024 Free Software Foundation, Inc. ++This file is part of GCC. ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++<http://www.gnu.org/licenses/>. */ ++ ++#include <unistd.h> ++#include <math.h> ++#include <cstring> ++#include <cstdio> ++#include <cstdlib> ++#include <stdio.h> ++#include <stdint.h> ++#include <stdlib.h> ++#include "ai4c-infer.h" ++#include "config.h" ++#include "system.h" ++ ++#define M_MODE_SIZE 6 ++#define NATIVE_TUNE_SIZE 128 ++#define CATS_STRINGS_ROW 12 ++#define CATS_STRINGS_COL 65 ++#define OFFSET_ROW 6 ++#define SCALE_ROW 6 ++#define UNITY_ROW 1 ++#define COEFFICIENT_ROW 18 ++#define COEFFICIENT_COL 100 ++#define COEFFICIENT1_ROW 100 ++#define COEFFICIENT1_COL 1 ++#define INTERCEPTS_ROW 100 ++#define INTERCEPTS1_ROW 1 ++ ++/* Model info. */ ++static int64_t argv_hw1[M_MODE_SIZE]; ++static char native_tune[NATIVE_TUNE_SIZE]; ++ ++/* Intermediate computation results from the ONNX model. */ ++static char cats_strings[CATS_STRINGS_ROW][CATS_STRINGS_COL]; ++static float offset[OFFSET_ROW]; ++static float scale[SCALE_ROW]; ++static float unity[UNITY_ROW]; ++static float coefficient[COEFFICIENT_ROW][COEFFICIENT_COL]; ++static float coefficient1[COEFFICIENT1_ROW][COEFFICIENT1_COL]; ++static float intercepts[INTERCEPTS_ROW]; ++static float intercepts1[INTERCEPTS1_ROW]; ++ ++/* Model result. */ ++static int64_t initialized; ++static int64_t optimize_result; ++ ++void ++prepare_native_tune_str (const char *info) ++{ ++ gcc_assert (strlen (info) < NATIVE_TUNE_SIZE); ++ if (info) ++ strcpy (native_tune, info); ++ return; ++} ++ ++void ++set_cache_info (int prefetches, int l1_cache_size, ++ int l1_cache_line_size, int l2_cache_size, ++ int prefetch_latency, int prefetch_distance_factor) ++{ ++ gcc_assert (5 < M_MODE_SIZE); ++ argv_hw1[0] = prefetches; ++ argv_hw1[1] = l1_cache_size; ++ argv_hw1[2] = l1_cache_line_size; ++ argv_hw1[3] = l2_cache_size; ++ argv_hw1[4] = prefetch_latency; ++ argv_hw1[5] = prefetch_distance_factor; ++} ++ ++/* Read float from onnx.fdata. */ ++ ++float static ++read_float_from_file (FILE* file) ++{ ++ char hex_float[8]; ++ float result; ++ ++ if (!file) ++ { ++ perror ("Can not open file."); ++ return result; ++ } ++ ++ if (fscanf (file, "%8s", hex_float) != 1) ++ { ++ perror ("Can not read hex from onnx.fdata."); ++ return result; ++ } ++ ++ unsigned char bytes[4]; ++ for (int i = 0; i < 4; i++) ++ { ++ sscanf(hex_float + 2 * i, "%2hhx", &bytes[i]); ++ } ++ ++ memcpy(&result, bytes, sizeof(float)); ++ return result; ++} ++ ++/* To read model parameter information from onnx.fdata and store it into the ++ appropriate arrays. */ ++ ++static void ++fill_node (const char *file_name) ++{ ++ FILE *file = fopen (file_name, "rb"); ++ ++ if (!file) ++ { ++ perror ("Can not open file."); ++ return; ++ } ++ ++ /* Read cats_strings from onnx.fdata. */ ++ char hex_string[2]; ++ for (int i = 0; i < CATS_STRINGS_ROW; i++) ++ { ++ for (int j = 0; j < CATS_STRINGS_COL - 1; j++) ++ { ++ if (fscanf(file, "%2s", hex_string) != 1) ++ { ++ perror ("Can not read cats_strings from onnx.fdata."); ++ return; ++ } ++ cats_strings[i][j] = (unsigned char)strtol(hex_string, NULL, 16); ++ } ++ cats_strings[i][CATS_STRINGS_COL - 1] = '\0'; ++ } ++ ++ /* Read offset from onnx.fdata. */ ++ for (int i = 0; i < OFFSET_ROW; i++) ++ { ++ float result = read_float_from_file (file); ++ offset[i] = result; ++ } ++ ++ /* Read scale from onnx.fdata. */ ++ for (int i = 0; i < SCALE_ROW; i++) ++ { ++ float result = read_float_from_file (file); ++ scale[i] = result; ++ } ++ ++ /* Read coefficient from onnx.fdata. */ ++ for (int i = 0; i < COEFFICIENT_ROW; i++) ++ for (int j = 0; j < COEFFICIENT_COL; j++) ++ { ++ float result = read_float_from_file (file); ++ coefficient[i][j] = result; ++ } ++ ++ /* Read coefficient1 from onnx.fdata. */ ++ for (int i = 0; i < COEFFICIENT1_ROW; i++) ++ for (int j = 0; j < COEFFICIENT1_COL; j++) ++ { ++ float result = read_float_from_file (file); ++ coefficient1[i][j] = result; ++ } ++ ++ /* Read intercepts from onnx.fdata. */ ++ for (int i = 0; i < INTERCEPTS_ROW; i++) ++ { ++ float result = read_float_from_file (file); ++ intercepts[i] = result; ++ } ++ ++ /* Read intercepts1 from onnx.fdata. */ ++ for (int i = 0; i < INTERCEPTS1_ROW; i++) ++ { ++ float result = read_float_from_file (file); ++ intercepts1[i] = result; ++ } ++ ++ /* Read unity from onnx.fdata. */ ++ for (int i = 0; i < UNITY_ROW; i++) ++ { ++ float result = read_float_from_file (file); ++ unity[i] = result; ++ } ++ ++ fclose (file); ++ return; ++} ++ ++static void ++matmul (const float *lhs, const float *rhs, int m, int k, int n, float *out) ++{ ++ for (int i = 0; i < m; i++) ++ { ++ for (int j = 0; j < n; j++) ++ { ++ out[i * n + j] = 0.0f; ++ for (int p = 0; p < k; p++) ++ { ++ out[i * n + j] += lhs[i * k + p] * rhs[p * n + j]; ++ } ++ } ++ } ++} ++ ++static void ++add (const float *lhs, const float *rhs, int length, float *out) ++{ ++ for (int i = 0; i < length; i++) ++ { ++ out[i] = lhs[i] + rhs[i]; ++ } ++} ++ ++static void ++sub (const float *lhs, const float *rhs, int length, float *out) ++{ ++ for (int i = 0; i < length; i++) ++ { ++ out[i] = lhs[i] - rhs[i]; ++ } ++} ++ ++static void ++sigmoid (const float *in, int length, float *out) ++{ ++ for (int i = 0; i < length; i++) ++ { ++ out[i] = 1.0f / (1.0f + expf (-in[i])); ++ } ++} ++ ++static void ++relu (const float *data, int length, float *out) ++{ ++ for (int i = 0; i < length; i++) ++ { ++ if (data[i] < 0) ++ { ++ out[i] = 0; ++ } ++ else ++ { ++ out[i] = data[i]; ++ } ++ } ++} ++ ++static void ++line_concat (const float *in, int in_size, float *out, int out_size) ++{ ++ for (int i = 0; i < in_size; i++) ++ out[out_size + i] = in[i]; ++} ++ ++static void ++one_hot_encoder (const char *in, const char (*cats)[65], float *out, ++ int out_size) ++{ ++ for (int i = 0; i < out_size; i++) ++ { ++ if (i < out_size && strcmp (cats[i], in) == 0) ++ { ++ out[i] = 1.0f; ++ } ++ else ++ { ++ out[i] = 0.0f; ++ } ++ } ++} ++ ++static void ++imputer (const int64_t *in, int size, float *out) ++{ ++ for (int i = 0; i < size; i++) ++ out[i] = in[i] * 1.0f; ++} ++ ++static void ++scaler (const float *in, const float *offset, const float *scale, int size, ++ float *out) ++{ ++ for (int i = 0; i < size; i++) ++ out[i] = (in[i] - offset[i]) * scale[i]; ++} ++ ++static int ++argmax (const float *in, int in_size) ++{ ++ int out_idx = 0; ++ for (int i = 0; i < in_size; i++) ++ { ++ if (in[i] > in[out_idx]) ++ out_idx = i; ++ } ++ return out_idx; ++} ++ ++static void ++preprocess (int argc, int64_t *argv, int64_t *in_modes) ++{ ++ int default_int_val= 0; ++ for (int i = 0; i < argc && i < M_MODE_SIZE; i++) ++ { ++ if (i < argc) ++ { ++ in_modes[i] = argv[i]; ++ } ++ else ++ { ++ in_modes[i] = default_int_val; ++ } ++ } ++} ++ ++/* The process of model inference. */ ++static int ++graph_infer (int argc, const char *argv, int argc2, int64_t *argv2) ++{ ++ const char *file_name = getenv ("GCC_AI4C_ONNX_FDATA"); ++ ++ if (access (file_name, F_OK) == 0) ++ { ++ fill_node (file_name); ++ } ++ else ++ { ++ return 0; ++ } ++ ++ int64_t in_modes[M_MODE_SIZE]; ++ ++ preprocess (argc2, argv2, in_modes); ++ ++ /* concat_result and encoder_out are intermediate computation results from ++ the ONNX model. concat_result is a 1 × 18 matrix, and encoder_out is a ++ 1 × 12 matrix. */ ++ ++ const int concat_out_size = 18; ++ float concat_result[concat_out_size]; ++ const int encoder_out_size = 12; ++ float encoder_out[encoder_out_size]; ++ ++ one_hot_encoder (argv, cats_strings, encoder_out, encoder_out_size); ++ ++ line_concat (encoder_out, encoder_out_size, concat_result, 0); ++ ++ float variable[M_MODE_SIZE]; ++ imputer (in_modes, M_MODE_SIZE, variable); ++ ++ float variable1[M_MODE_SIZE]; ++ scaler (variable, offset, scale, M_MODE_SIZE, variable1); ++ float transformed_column[concat_out_size + M_MODE_SIZE]; ++ line_concat (variable1, M_MODE_SIZE, transformed_column, 0); ++ line_concat (concat_result, concat_out_size, transformed_column, 6); ++ ++ /* This requires performing matrix multiplication between a 1 × 18 matrix ++ and an 18 × 100 matrix */ ++ ++ const int m = 1, k = 18, n = 100; ++ float mul_result[n]; ++ matmul (transformed_column, coefficient[0], m, k, n, mul_result); ++ ++ float add_result[n]; ++ add (mul_result, intercepts, n, add_result); ++ ++ float next_activations[n]; ++ relu (add_result, n, next_activations); ++ ++ /* This requires performing matrix multiplication between a 1 × 100 matrix ++ and an 100 × 1 matrix */ ++ ++ const int m2 = 1, k2 = 100, n2 = 1; ++ float mul_result1[n2]; ++ matmul (next_activations, coefficient1[0], m2, k2, n2, mul_result1); ++ ++ float add_result1[n2]; ++ add (mul_result1, intercepts1, n2, add_result1); ++ ++ float out_activations_result[n2]; ++ sigmoid (add_result1, n2, out_activations_result); ++ ++ float negative_class_proba[n2]; ++ sub (unity, out_activations_result, n2, negative_class_proba); ++ const int prob_size = n2 + n2; ++ float probabilities[prob_size]; ++ line_concat (negative_class_proba, n2, probabilities, 0); ++ line_concat (out_activations_result, n2, probabilities, n2); ++ ++ int argmax_output = argmax (probabilities, prob_size); ++ return argmax_output; ++} ++ ++void execute_sha256 (const char *input, char *output, size_t output_size) ++{ ++ char command[256]; ++ snprintf (command, sizeof (command), "echo -n \"%s\" | sha256sum", input); ++ ++ FILE *pipe = popen (command, "r"); ++ if (pipe == NULL) ++ { ++ perror ("Failed to run command."); ++ return; ++ } ++ ++ fgets (output, output_size, pipe); ++ pclose (pipe); ++} ++ ++int ++get_optimize_decision_from_ai4c () ++{ ++ if (initialized== 1) ++ { ++ return optimize_result; ++ } ++ if (native_tune && (strchr (native_tune, '+') != NULL)) ++ { ++ char hash[65]; ++ char input[64]; ++ const char prefix = '='; ++ const char *start = strchr (native_tune, prefix); ++ if (start) ++ { ++ start += 1; ++ const char *end = strchr (start, '+'); ++ if (!end) ++ { ++ end = native_tune + strlen (native_tune); ++ } ++ size_t len = end - start; ++ if (len >= sizeof (input)) ++ len = sizeof (input) - 1; ++ strncpy (input, start, len); ++ input[len] = '\0'; ++ } ++ else ++ input[0] = '\0'; ++ ++ execute_sha256 (input, hash, sizeof (hash)); ++ optimize_result = graph_infer (1, hash, M_MODE_SIZE, argv_hw1); ++ initialized = 1; ++ if (optimize_result == 1) ++ setenv ("AI_GUIDED", "1", 1); ++ } ++ return optimize_result; ++} +diff --git a/gcc/ai4c-infer.h b/gcc/ai4c-infer.h +new file mode 100644 +index 000000000..7fb75900b +--- /dev/null ++++ b/gcc/ai4c-infer.h +@@ -0,0 +1,29 @@ ++/* Lightweight AI Inference Framework. ++ ++ Copyright (C) 2024-2024 Free Software Foundation, Inc. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it under ++ the terms of the GNU General Public License as published by the Free ++ Software Foundation; either version 3, or (at your option) any later ++ version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with GCC; see the file COPYING3. If not see ++ <http://www.gnu.org/licenses/>. */ ++ ++#ifndef AI4C_INFER_H ++#define AI4C_INFER_H ++ ++extern int get_optimize_decision_from_ai4c (); ++extern void set_cache_info (int prefetches, int l1_cache_size, ++ int l1_cache_line_size, int l2_cache_size, ++ int prefetch_latency, int prefetch_distance_factor); ++extern void prepare_native_tune_str (const char *info); ++#endif /* AI4C_INFER_H */ +\ No newline at end of file +diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc +index 08a43541e..1d479f270 100644 +--- a/gcc/config/aarch64/aarch64.cc ++++ b/gcc/config/aarch64/aarch64.cc +@@ -18764,12 +18764,14 @@ override_C_optimize_options (struct gcc_options *opts) + opts->x_flag_ipa_struct_reorg = 6; + opts->x_struct_layout_optimize_level = 6; + opts->x_flag_gnu89_inline = 1; +- opts->x_flag_ccmp2 = 1; +- opts->x_flag_array_widen_compare = 1; + opts->x_flag_convert_minmax = 1; + opts->x_flag_tree_slp_transpose_vectorize = 1; + opts->x_param_max_inline_insns_auto = 64; + opts->x_param_inline_unit_growth = 96; ++ opts->x_param_pointer_compression_size = 16; ++ opts->x_semi_relayout_level = 14; ++ opts->x_flag_ipa_prefetch = 1; ++ opts->x_flag_ipa_ic = 1; + } + + /* Check whether in CPP language or LTO with only CPP language. */ +@@ -18826,6 +18828,8 @@ override_optimize_options_1 (struct gcc_options *opts) + opts->x_param_ifcvt_allow_register_renaming = 2; + opts->x_param_max_rtl_if_conversion_unpredictable_cost = 48; + opts->x_param_max_rtl_if_conversion_predictable_cost = 48; ++ opts->x_flag_ccmp2 = 1; ++ opts->x_flag_array_widen_compare = 1; + } + + static void +@@ -18848,6 +18852,8 @@ override_Fortran_optimize_options (struct gcc_options *opts) + opts->x_flag_reorder_blocks = 1; + opts->x_flag_crypto_accel_aes = 1; + opts->x_param_flexible_seg_len = 1; ++ opts->x_flag_alias_analysis_expand_ssa = 1; ++ opts->x_flag_chrec_mul_fold_strict_overflow = 1; + } + + /* Reset the optimize option. +@@ -18857,7 +18863,9 @@ static void + reset_machine_option (struct gcc_options *opts) + { + if (!(opts->x_optimize_maximum) +- || strstr (opts->x_aarch64_tune_string, "hip09") == NULL) ++ || opts->x_aarch64_cpu_string == NULL ++ || (strstr (opts->x_aarch64_cpu_string, "tsv110") == NULL ++ && strstr (opts->x_aarch64_cpu_string, "hip09") == NULL)) + { + return; + } +diff --git a/gcc/gcc.cc b/gcc/gcc.cc +index 32e45adc2..4592a4ec8 100644 +--- a/gcc/gcc.cc ++++ b/gcc/gcc.cc +@@ -5798,6 +5798,9 @@ do_self_spec (const char *spec) + do_spec_2 (spec, NULL); + do_spec_1 (" ", 0, NULL); + ++ const char* tune_native = eval_spec_function ("local_cpu_detect", "cpu", ""); ++ setenv ("GCC_AI4C_TUNE_INFO", tune_native, 1); ++ + /* Mark %<S switches processed by do_self_spec to be ignored permanently. + do_self_specs adds the replacements to switches array, so it shouldn't + be processed afterwards. */ +@@ -8121,6 +8124,7 @@ driver::main (int argc, char **argv) + putenv_COLLECT_AS_OPTIONS (assembler_options); + putenv_COLLECT_GCC (argv[0]); + maybe_putenv_COLLECT_LTO_WRAPPER (); ++ putenv_ONNX_FDATA (); + maybe_putenv_OFFLOAD_TARGETS (); + handle_unrecognized_options (); + +@@ -8551,6 +8555,34 @@ driver::putenv_COLLECT_GCC (const char *argv0) const + xputenv (XOBFINISH (&collect_obstack, char *)); + } + ++/* Set up to remember the pathname of the onnx.fdata. */ ++ ++void ++driver::putenv_ONNX_FDATA () const ++{ ++ char *lto_wrapper_file; ++ lto_wrapper_file = find_a_program ("lto-wrapper"); ++ ++ if (lto_wrapper_file) ++ { ++ lto_wrapper_file = convert_white_space (lto_wrapper_file); ++ char native_file[512]; ++ const char *onnx_fdata = "onnx.fdata"; ++ strncpy (native_file, lto_wrapper_file, sizeof (native_file) - 1); ++ native_file[sizeof (native_file) - 1] = '\0'; ++ char *last_slash = strrchr (native_file, '/'); ++ if (last_slash) ++ strcpy (last_slash + 1, onnx_fdata); ++ obstack_init (&collect_obstack); ++ obstack_grow (&collect_obstack, "GCC_AI4C_ONNX_FDATA=", ++ sizeof ("GCC_AI4C_ONNX_FDATA=") - 1); ++ obstack_grow (&collect_obstack, native_file, ++ strlen ( native_file) + 1); ++ xputenv (XOBFINISH (&collect_obstack, char *)); ++ } ++ ++} ++ + /* Set up to remember the pathname of the lto wrapper. */ + + void +diff --git a/gcc/gcc.h b/gcc/gcc.h +index 63231ddb3..ff3ae8bed 100644 +--- a/gcc/gcc.h ++++ b/gcc/gcc.h +@@ -44,6 +44,7 @@ class driver + void set_up_specs () const; + void putenv_COLLECT_GCC (const char *argv0) const; + void maybe_putenv_COLLECT_LTO_WRAPPER () const; ++ void putenv_ONNX_FDATA () const; + void maybe_putenv_OFFLOAD_TARGETS () const; + void handle_unrecognized_options (); + int maybe_print_and_exit () const; +diff --git a/gcc/ipa-hardware-detection.cc b/gcc/ipa-hardware-detection.cc +index 8085a8c65..75b74aa03 100644 +--- a/gcc/ipa-hardware-detection.cc ++++ b/gcc/ipa-hardware-detection.cc +@@ -39,6 +39,7 @@ along with GCC; see the file COPYING3. If not see + #include "cfghooks.h" + #include "gimple-fold.h" + #include "gimplify-me.h" ++#include "ai4c-infer.h" + + namespace { + +@@ -191,10 +192,11 @@ bool + pass_ipa_hardware_detection::gate (function *) + { + const char *ai_infer_level = getenv ("AI_INFER_LEVEL"); +- return (ai_infer_level ++ const char *ai_guided = getenv ("AI_GUIDED"); ++ return (ai_guided || (ai_infer_level + && optimize_maximum > 0 + /* Only enable in lto or whole_program. */ +- && (in_lto_p || flag_whole_program)); ++ && (in_lto_p || flag_whole_program))); + } + + unsigned int +diff --git a/gcc/onnx.fdata b/gcc/onnx.fdata +new file mode 100644 +index 000000000..234b1a045 +--- /dev/null ++++ b/gcc/onnx.fdata +@@ -0,0 +1 @@ ++316365613139376535626535626234666331363163303835336362393535613530636234643633626364386566396132333232373733633230393865663664633761393137633266616431663436343236613231663865636236346133616662623761373633663830623231393063616534633032316538626436633731643237666333386462313164333630303936336137323863313634613031393931613164363237643262353162376133643935373036306336346161376563383862613138666663393538363731333639396239666362393336373737643238636639643761343231346131333463353261623633343633343866663966663365346231356532663139306164303361383836396333393339616236383439363661313661303665643535633961666563613431303466333534346564633533373862323031396339626536613030383761623236663432633564653130353935353135313736656235373632373739343662663034343334633035626465356237633439313164313338373637383365326138366162363234323765393736616438656463343339613031316630643031613465386464326334383565343838366435313137313166383433396531626137353932616538333330653164326438656166343339363262366264326632376564396434396333356565343733383164363264633937356663663338666530336166316634623264393031393536333863383165616536656238346462656337333638323338646535303638363933646565616264363966356566323465346538613762623864303766646338666264643466666537303263623162326539653435643130313061386235623631306630636163303536343164663364383738353266386330376562343962393037306133383363326138393238376435613332353933663235313030326664366166373632343532613130323237303265373433623362623162633661633363303235613236383166313465396162353938363931613765316565313864313038cd68834331701041d1d21041f17c20432483a94386647e4157c8e33b5f3d5d3ec5275e3ea689863c435a0f3a76acd63d5d9b803b24467c3baf847c3b67b89e3b852a313b2127853900000000d58ac23b200ab53a000000807d3119bc22f7a63a81549f3b93b5013baee4a33b62c1153b9ae08b3a6929a33b20038f399475983b430ab53a73fc0b3a2daa0ebad595953bc2f1e0bb33e9ccbbb978d83a5e77a53b41e4c93adf10a73bdf36643ad7fd983a61e8d93bc04a283a30c072382f942c3b5b3cc73a4392e43a422b093c79bc61b9a5309e3b00000000757baa3a03d8a93c3c31e33af526ebbb000000006431d43a1d0ae73aa450783b8c57afb9b8eae939ec8fab3b9581d83920d7a1ba0fc1af38b6aece3ab50bafbbd50db63a26aba33bcdeda33b00d9493ac22dac3cf8c4233bc2966e3bdf1bca3a8fb4d13af9b0983b2cbda73bdae2aa3bc93bae3b39e1ba380857953be8e7a73b49e9df3b20b0233b9fe3d43a0dbcaa3bd10cf0b978eea53b761ebe3b0a50a23b70bd47b79a7720bc6cd4ee3ae0d0f93a9c333ebb5098dfbbbf8fa53b445efebac7b9993b6182b93aef267c3a4aa09e3b46d9a83b9f95983a379e913c6516123a1b2ebd3aaf943c3a0b90803becba92bce68f673be723253c5d7f813ad779613800000080af3c65ba6999743900000080957a003d82f2fe39baab4d3b7f348c39b8d3323b3c1e253ace952dbbc9d364bc3aafaf373d0a633be8fdee3968b0fa39eb70a83a7cba4e3bdf2407bc40f50f3d94f4c3b9a828573b3f2bc3b99a5763bcccb838bb24f011bae3400dbdc3074fba30a829bb3dde6e3ad7c2caba2b2aa7b8d479a7bbebe2603a7025583b00000000017414ba680386bc9b365e3aaacb03bc000000006afd90b9a64e263980eb223c80a48ebcca9703392310573b1fd419bbf7368abc17a2083a3ceafab95eb11cbcf29995b9a64264bc8bae403bc1dc6139631c88bc12e3373c07cf0c3cdc93a6b97edbc0b917754d3b5cdc143c61ef393b40a809baf3861dbbafce623be550513b828382bc359d513afa4a25ba31394c3bb013da3a9835553bf3d9553bec2b65bcee09bab9f6343e3c03a59f39fb11053a078e7cbc5bd006bcfe23363b08d12cbb3cfb533bb98a8fbadcb99139cbd1573b24725e3b01014fb6dcbc45ba6ee024bb318db1baf39ce9b952d625bc41afddb91d7dffbbc0ba163b0387b93b2594623b00000000f60cf9ba483c983b0000008015e6c6bcbd45983b77d62ebcfbb69f3b7b5752bcc334ab3b4f9806bc9d89063cc0675a3b807426bca81a9f3b7ef56f3b6a96a13a045937bcd4a2f33cb92173bc40af783b26ac40bc5fef6b3beba6fe3b8c7207bc5e25443bfd99a33be7e7403b4c2508bc0c87bb3bb95dcd3abe228b3bac03deb91a2ab03add753bbc000000002e04703be98f1fbccef2af3b17ebe93c0000000020e37a3b46ba913b1fd7003b1f3f133df85d423bacc843bc5fada7bbc8680d3d8423503b2afc6c3b4e43033dcfcc7c3bcece053cdbb44ebc4151823ba14426bc6e942c3b3bdc4d3a34967f3b7687783bd0cd3ebcfc75053ade324ebcd10c32bc9ff9fbbb0b7430bcf60e4abcd6b6e03b295db43b25c75d3b88334fbc8d95883ac9c73ebcddf941bc2b18083c43044c3b405414bd7617963b9910a03bd5e70c3d9356f23c3a2750bc472107bce47d47bc0125243b3c41953b0f6134bc8c403bbc8fb3873ba5e218bcae5d06bc2dfe103b758a493b43cef63cd7438d3c2bf1eb3b2d4a833cf13a43bc5d14c4bd000000002932a7bc3191e4bb00000080224e753dea87dfbb41e28a3cbeb44b3d731d8f3c1312d2bb54e44dbc232b84bc74f9d9bd033bcb3cdda410bbeeeb47bdd7e44e3b3c3e21bb435712bdb3e6413c82e770393f20a53cc6642dbc325484bc410c4e3dcb49823dc262bd3c204a563d032393bb0887753c0cad943d3946abbbcb77b3bc9151c6ba860dc0bd00000080e5880d3a2f960ebd1bba99bcce3910bd0000008037acde3be98a983bd60b7c3c66ee27bd2431aab98b2b95bded06813bc17429bdf5a9e9bc4ff297bafad924bdc14d53bc901784bcad96073cd34989bc84580fbd1e276b3ca48e513c189796bbe15f8cbb39fa473cce9c693cbdd0843a4f07443dbf40c03c38a1893c3790ab3cb48c58bcc5e9863b684448bcb5c32abc0726a6ba1def9ebb57ce273d772b84bc1925c63d2e26d8bc24460cbcb0f807bc8dd0a5bc9ba312bd6ed5393c32e1f43cf3c58bbc8a5334bc8e0c53bbf78cb13c7805793c8d5800bbc4a5c2bcfc2c85bba79d3c3df00f493db55cb73cce71c43dc030f03cc953823c79c1f13d614db73d0000000074e98f3ca415183c000000801104803d9afea83ddf9ff93d835f9bbce8d8623cd67e093c453d143d7d8c90bc1434e23d24580b3e00711d3c729b903d81a0253c82e9b53cba65123ca564a23d7a53003c2c82ec3de139f93c58f58ebce101813ba5782d3d4e198e3dbaa40fbb58e2bc3bbf92943c98421e3df32c0c3cbc235ebcc2fe443c2789033e00000080b94ca73be81815bd1758e53c5df7053b00000080f9f63f3cc7a9893cb846823c65d2143c9bb50e3cced60c3e92fb983b583593bbbbfe263e390bdf3b696887bbd13e823c207890bc1cf0c23cd688163dd14e16bdd3cb813c95a6593c70d7083cd6c6e43b6d4d9b3c9455683c876e1f3e599ff83c4b377f3c2afd953cbeedd43ccbdb163d2d78fd3bcc84363c5c7fa63c22fedf3c3318e83d0ecdba3d0ea690bc462e9a3d0b11013cf19f503da4f8813db249c0bba7300f3c2d6c223dd1d7663b56b4c43d56e5f93c4799e43b0702a73d4e15ae3de8040e3cdfad72bc0ab6593d1fb7c9bb6f90b43dbfcab83b4cd802bbbd3c993be1a91c3c8f677cbaa83420bb0000008084bf263b6336adba00000000373bd13b521cffba733ac83bee8c9bba1306f73bbf5471ba8651773bc863ac3a6ed119bb926fc43b9368e5ba34f319bba9c8ebbaa74acb3b39169ebc812d573b4764beba5815ea3b5211caba956ec23a9e107d3b64dbc4ba674ac73be88107bb5354493b688c5cbaaf4571bab3d6b3bae566603b11b0b0ba6bd1d03b000000005b6cc1ba0720833c5210c7ba85cd97bc000000003c1fc4bab35dbbba30b6fe3b389ea2bc97eb8eba37bae43b697a293b87969abc5c9e04bb83acc2ba5f8fadbcf872c5bab03daf3ad509fe3b2f81f9ba4317863cd808bb3b0177f03b02dabdbab2efbdba3b03d83bc09f223c6030ec3b0137c13b29f5663bf195ce3bc10eff3b18cda93b35486cba7dd7c8ba0d51003c34dc93ba891be33bb785ea3bbb75a73ae04f2abba21d8f3b9065c3ba8892bbbac37d96bc6a0c9dbc596cef3bce5a063bd64cec3be62fb0baaa5cbbba1acbd03b5cdfe13b0f37e9ba48bb653cc513733bc352a0bacba0ffba469ababdf17dae3c939271bd7af718be283c113ed15fbb3d00000080920e90badfa4d63d00000080df70d63c18f5c03dbbb677bde981e83d5a78d2bd0985093e663cffbd9a12803d3fc0b33d65b388bd50a0dd3d011acc3d2df0203ed04095bd5c9a8abd7294323dd404b33dabc5bdbd8042a33d93cb6f3d3b81f7bd4e2e823dfdba273d83ea863d7d0f3cbeaff2133e565a3d3d0d66ca3d035bea398af8073ec3b79abd00000000d078ad3dbe475c3c9267013e1db874bd000000809e1db03d23f9cb3d902b14be16ea52bdc41ac33db52abbbdf07981bed9442ebd4d94a83de0f7a93d745d41bd4cebb33d01f57c3da16adbbd8ae0b63d2d6c763cc1f40ebe098cbebddc59b83d256bb13d359fa9bdf886e5bd0dfcbbbdd68d0dbd726807be83579fbd05c9dcbdd8e8983ccd620d3edcf69f3dc980e8bdcb6c923e1cdcb3bdcc6ec1bde600823d030d993d9b3a1d3d420bcd3d754dde3df8132cbdfabb85bdce89c3bdefd054beafc6c8bd931e7c3d30fed83dbef795bd7fb1b3bd6d9eb73de344993cceb603bebd216b3de45c803d70a1953e1dfb62bea91d2a3ed284113f544008bfc50498be00000000d460e13b0d05b2be0000000066f28ebe9adb9ebe4ee6323efd01c7be63c7ac3e888dfabe44e5e23ec6f631bed07890beb0fe4a3e6217babee93daabecf501abfd1b9573e7bd53a3f805fe2be10a48cbecac09a3e52c67bbef48823be9a9dd83eb74340be75deeabd3fbf42be9d1c3c3f74e10abf923b05beaec4a4be66229fbbb35ff6be8a146c3e00000080638d84be04cb14be4dcfe7be53ec253f00000000208c89be7039a8bef1170c3f70ac0d3fdcd59bbe68f4973e6b1f903f16a3eb3e6d0a86be361a81be4cba023f82c88cbe01132ebea3feb73e79f391beeb6826be5130063fc4d9953e4a6690becd328abe13b7803efa6fc23e9823993efea5b83d60e0f43eff276c3ed453ba3e374b63bdd41502bf17e36dbe5034c53ea2ac9dbfff04903e64879e3e4c9d35bede8b6cbe435ed2be4e73abbe3020bfbef8c6e83e8630343ff9de9e3e78d65c3f3659a63eee6f35bed729b4be3954623ef8778f3ef4758fbe75fd4ebed9e3ed3e770a23be6b403ebed7e596bd656a093d6e463cbddfa103be2213f73d8a4c973d0000000050dc0bbb9d64b03d00000080d5e9283d3e429c3d1d0b3ebd35e5c03d101cacbd51b0e83dd890d5bd1378483dcc6a903d431d54bda0cab63d6376a63df1ab0a3e1a586bbd6bcfdbbd5cfb8d3db01a903dced598bdc300823d43fc3a3db75ccebdfea14a3d8f27fe3cf98c533de7d924bef762fc3df248103dde32a53d8d067e3a17bee53d5b6573bd00000080285b8b3d5cd0283d3447d93dd249c3bd00000000317f8d3db47da63df06ffebd933ca7bd3da29e3d0f9496bd7f1c6dbe2feb8abdd761863d8d3f883dc33d9abdecee903dd3ed453dc93cb4bd785f933d08c8393d5eb3f4bdef249bbd4ffb943d98be8e3d7b5487bd365bbebd103397bd19bdd2bcbc53e4bd9eb77cbd7d6fb5bdcc15713c4f00f03d54fc7e3d1c98c0bd05dd853e0dea8fbd5a2d9cbd35aa4b3d879a713df19b783d214da73d821bb73dc52889bdb707d4bdfd3d9ebdb6e53cbe1ee9a2bdc740443d3594b23de3046bbd4acb8fbdbf54943d23de5e3dbd36ddbddae5363d5155473de961aa3ed3a432be5d8e583e1d9a173f318110bfbc47aabe00000000a572363c9755c8be00000000c5235abe14fbb1be3f59573e57f8dbbe0adcc13e40d705bf1517f43eb1d467be07b3a2be40216f3eb6d9cfbe84b1bbbeba571cbf5c30843e96700c3f700eb6beee52a3bedcd6ab3eb39b93be9fd159be7524eb3efdd668beee2a1abe9dd171be0acf3d3f227e12bfbb312dbe3966bbbee3f99bbcec2f01bf2790883e0000008047ae9dbe59a438be26a4f9bedaf7f93e000000004b62a0bea849bdbe00ac123fa905d63e3e96b2bed43da93eef558d3f0c73b23e65bf96be67359abe8cc9c53e372ba4be184265becf66cb3edd11a7bed6d249be506f0d3f7429af3e0ba1a8bed2aca1becacc973e154ad73e9df5a93e53dd003e4cbd023fedc88d3e01e5cc3eff3eb3bd194d0abfa27090beb4a9d93ed0bf9dbf7c95a13e7bb5af3eb12a6bbeace488beb5419fbe86c8bebedea8d1beb735b03e6889073fb9f1b13e34835e3f7d7bb73ef8e961be08c5cabe6ff4833e956fa13e33e0a7befd926ebe6009fe3e8bc653be843b65be6a18a0bdd534f93c01fc52bd6b6f09be77e3fd3dc1c59f3d00000000e97d92bbf664b83d00000000972f193da896a43d65e754bd5e81c83d269cb4bd1d84ef3d1f11ddbd55385e3d020b993d4fdf69bd12a5be3ddd95ae3d62cb0d3e7ff87fbd7418c7bdf8ca803d1ed5983d13dea1bd480c8b3df3be503d2202d6bd540d5e3dc93e133d530d673d1b012bbe8f7c013eb79d243d9072ad3d772a273acfa6ec3d0ce383bd00000000fd56943de9119e3c0f64e03dd0cdb0bd00000080274f963deba6ae3d1c0a05be683297bd1f18a73d49b19fbdc64270be58657bbd263a8f3d374f913d63948bbda1b0993ddab55b3d7485bcbd00fc9b3df3edb03c892c00be9350a6bd25af9d3d968b973d79fe90bdeca4c9bdba46a0bddb1004bdbc84ebbde25788bd3ab0bdbda690963ccab6f63d60cc883d918ec8bd51ba873e9a3e99bdd01da5bde567613d50fd813d6981613d7163af3ddbdcbe3d473578bd8c07c0bd6924a7bd21c53fbe8ca6abbdaaf9573d158dba3d86c37fbd572399bd2a109d3d6994dc3c1e95e4bd4e1d4b3d7eca5a3d032771bdc8f5403d4b1a09bde53ceabdbfd4db3de573753d000000004909b9ba32b88f3d00000000c1c16c3d8b42803d994910bd8ba8a03dac6f8bbd482cca3db803b7bd3e900f3dbd44693d09b223bd2e39963db96c893d58fbf83dd2092ebd01b411be3149c43d891f633dd5b879bd93444b3d60e4033dfcc3aebd6c231b3de078bd3c57241d3dc16d17bed815e03d92dad63cc208853d82c1823a3acdc63d6d833ebd00000000ad06563de91bf63ca912bb3d854001be00000000941b5e3d5cd1873d744ce1bd91fce3bd57a17b3d804275bd64d465be11f8bdbdae6a583d0b73503dfc7bd1bd7155633d3e6a0c3db57194bd8eae6b3d6798093d58a9d7bdc3db71bdbd2b693d602c5f3d20c14fbd73e59cbdd53e77bdf28694bc1c71c5bd7f773ebd704a96bdd115383cf8ebd13df406403d27019fbde07f823eef7668bd4fcf7fbd6184123d9a113f3ddd37ad3dfe6a8a3dda459a3d3a30bbbdafb00cbe663c80bd0e3832be203886bdf262123d1972913df4a036bd978267bd99a5673d02f7283d6ae8bfbdd981033d3e82193df70172bdf1b33b3d2c8309bdf837ebbd4944dc3d875a763d000000001630b3baef2e903d000000008d6c663da8b4803d58ca10bd9221a13d74e08bbd7fa6ca3d338bb7bd700e103d71236a3dc13924bd00b2963d3de8893d2c6cf93d4ab12ebd894d11be9e58c13d81f0633dae8e7abd69054c3d5b51043d5d42afbdc1b21b3db199bd3cdab61d3d61aa17be7e84e03d2c57d73c907b853d18e27a3ac747c73d073e3fbd0000000013d0563d4ba4ec3cf98ebb3db3d400be0000000068ea5e3df445883d2b22e2bded78e2bd97817c3d232376bdb02f66becefebbbd053d593d6b38513d5d50cfbdab27643d1ce40c3d1ce994bdc0866c3dc167043d7161d8bd9abb72bd8c016a3ddcfa5f3d6e8650bdf66e9dbda51c78bd8b6494bc14dcc5bd07203fbd87b396bd8478363cab61d23dd8be403d137e9fbda19d833edb4969bdf35780bd9506133d4ac83f3d4157a93d2be18a3d44c19a3db553b9bd422c0cbe29ae80bd8b1f32be2ab086bd7fe6123d5ce9913d1b4c37bd253c68bdf37a683dda1d253d1866c0bd1fef033d630f1a3d55a78ebd9363e43c877813bdf06300be01e5f03d82788b3d00000000c3734fba035aa53d00000000e511873d7b68913d4e6e1cbd7718b73d813fa9bdcc68e13d5460cdbdbca5213daa5f843dade935bd383bac3d31919b3d2bf8073e69c848bd95ef2cbe5d65d83dba17833d49ee93bddd1f693de61c123dab7ac5bdd1dd2d3d5cf9643c6e50333d290923bebe38f63d2111e33cba73993d0a4b1c3aff26de3de00c5abd0000008092d2793d9b92fc3c1457d13de3451bbe00000080993e803dcd659b3d9082f7bd691808bef0f6913d51b390bdbab66cbe5acce9bda06e743dd62e733d250efbbd92ab833d46cc1d3d0c53b4bdea20873d8341113dbc72edbda26d8cbd369a873d164c813d395d75bdbe4ab3bd341392bd2d3f81bc05cadcbd939c5ebdc34eb6bd0175de3b5116e93d1b04603d00c2c2bda59a863e93b488bd09f997bda483253d75f4573dfd1ad83d23ed9c3d4bbfad3dc5f9e5bdf05c27be036399bd72e93bbe8204a0bd71d8243d7988a73d352350bdb95188bdcbd0863df73d3e3d0cbed5bd59aa133d40402b3d52f370bd85b63b3d0fd708bd723aeabdcdb1db3d87df743d000000806cedb3bad56b8f3d00000000d46d663d35e87f3d67e50fbd8859a03da22e8bbd43f6c93d37deb6bd811c0f3daeb1683d765223bdb9ef953d1c24893da7e7f83d15a22dbdd3ac11bee609c13d9f83623d703979bd21b74a3d7b7b033def92aebd70b81a3d730bbd3c5cbd1c3d0ecd17bef7f0df3d8140d63cd8b9843d1d757e3ac997c63d11023ebd00000000937d553d0e57ef3c0edaba3dc82f01be000000807c885d3ded82873d247ae1bd3436e3bd170e7b3d5ab774bd32eb68bef817bdbd6edc573ddbec4f3d4721d1bde3c1623d5ffa0b3df23994bd8a176b3d63e0053deec1d7bd7a5971bd5899683d57945e3d72434fbd92a59cbd4c9e76bde50b94bcc364c5bdab1a3ebd841a96bd88b5363cb8bbd13d548a3f3dd1e69ebd72ce833edbe867bd6f517fbda50d123dc88a3e3d8f2ba93dd51b8a3d83fc993d10a3babddc870cbe05e47fbdc04732be17f885bdfdfe113d7b27913dd82436bd100667bddb16673d808c263dbfbcbfbdfa2c033d6118193dc7b077bde7d4433ddcae0dbdc3a7efbd20dfe03dbb5f7c3d00000000603dbdbab68b933d00000080e852703d98cf833d651615bd4fbfa43dce3a8fbdd0edce3d2297bbbdfe4c143dbdf46f3de7d728bd892b9a3d092a8d3d635efe3dce9033bdcff113be186ac83d49a3693d124780bd8e56513d255a083d402eb3bde733203df262c43c6343223d04aa1abe2d29e53d0392de3cb1a9883d8ea7833ab384cb3d0e5744bd00000000d2555c3d8ed7f63caaa1bf3d462903be000000801f8e643d88808b3d81a9e6bd24a3e8bd4854813dee347cbdeafb6bbe1474c1bd62c75e3d92a6563d5559d5bda6e2693dbb14113dd06d98bd8065723d3e040a3d071cddbdc1d478bd83d76f3d8d9e653d47f855bde307a1bdaf257ebdce119abce91fcabd9f3a44bdee319abd37ff3e3ccecad63d6be6453d101fa3bd177f853e88256fbdab7483bd6e52173d12e6443da4ecaf3d32268e3d324b9e3d0603bfbd44ea0ebe44cb83bdad6235be57ee89bd933e173da14e953dd84a3cbd7c026ebd384d6e3d9d5f2b3de293c4bd3104083dbe891e3d97b16fbd8d903a3dc1e207bd7219eabdc854db3df7f4733d00000000e850b1ba61f98e3d000000005023653d9b007f3dc20d0fbd7beb9f3dfab88abdd293c93d1e74b6bdb4490e3d3bc7673d755122bd3e7e953d89af883dc493f83d8dbc2cbdcd0711be5457c03d389b613d165078bd9bcf493d41ae023d0226aebd2ddc193d9366bb3c94df1b3d008d17be2995df3d3dbcd43c3446843d9367763ad233c63da91b3dbd000000805b93543d7659ea3c8b72ba3d3a4100be00000080009f5c3dc40f873d922ce1bd32cce0bdcc237a3df4cb73bd988d68bef1ddbabdc7f2563d12034f3d497ccebd01d8613dde280b3d17c693bdf72d6a3dd434033d60b6d7bdb06b70bddbae673d96ab5d3d24584ebdc2339cbdb0b675bd189c92bc81e0c4bd3e2b3dbdbd8e95bd1ac1343c405bd13dbca23e3d396f9ebda2b7833e68ff66bd3d687ebd9a39113d03a53d3df37fa83d74a9893df58c993d4e6db8bdf6030cbedcfa7ebd142532be818285bd7426113d02b5903d4d4035bd28f265bdd02b663da385233d2056bfbd115b023db03c183db67f6bbd2238373dd9f904bd22b7e6bd6a08d83dee556f3d00000000ade0acba75698c3d00000000f81e613d4b3c7a3de7e50bbd952b9d3da23788bdd76dc63d2d7cb3bd59220b3d7e4f633d7cf91ebde1db923d2333863dd90ef53dfb1b29bd309d0fbe4f9dbd3d9c365d3d57a273bd67be453d2576ff3c0b43abbde685163d9d25b73c9e81183dc2bb15be5940dc3dd0c4cf3cd1d5813da770743ad315c33df03939bd00000080845d503d6a53e83ccd6fb73da62cfebd00000080394c583db596843d6201debd1f8fdfbd2971753d652e6fbd538966be80adb9bdc0b3523dd9e04a3ddf6dcdbdbf735d3d780f083d5d2a91bd6ead653d6e14023d8e3fd4bd8fda6bbd9f37633dc054593d64394abd3a8099bd5d0f71bd21308fbccde4c1bdbb5139bd100693bdb242303c8a24ce3d22bc3a3dc7bb9bbdb39e823e728962bd89a779bd5f050e3d61c0393db4f7a53d2828873d21de963dd6f4b6bd38820abe1b387abd071b30be521083bd6bf50d3d5f208e3d217c31bd92a961bd07ba613dbd25223d3f48bcbd38d9fe3c41ed143dd523d63e2f30a6be5ce7723ed5b3503f886143bf5a97d9be00000000c1361f3cccf6febe00000000c6a7ccbe1e6ae3be53717f3e548d0ebf056cf73e199933bf669a223feb077ebe93bfcebea700913e864805bf36c6f3be43625dbfe4309a3e1cd6843f05862bbfa03fc9be5676dd3e6810b4be955869be71381b3f575a89be84a727be4f278bbe140b873fb12947bf300e3ebed7eaebbe6f18e1bb9c9930bf87c0a83e00000080a1a9bdbe755354be942526bf6d1c6c3f0000008047d1c4be18e0f0be20e6483f35e1493fbe1adfbe1a73d93e563bcf3fc2fb273f6dc3bfbe63b5b8bec4753a3f2078c9be5a7578bef4c2033f06e3d0beea966dbe5368403f7778d63e0baccebe57bfc5be121fb83eee420b3f4424db3e763c033e51892f3ffbd8a83e686e053fdedaa1bd5d843abfda1faabe45450d3f5488eabf600cce3e4ce3e23ed6a181be523aa9be643b16bf1a7ff5be76e208bf7cdd253f4b25803f6a67e33e789a9e3fe61fee3e8b9781bea90601bfbac0a13ead42cd3e5d54cdbe8fe093be127f2a3fccd368be1ee887be106a0c3eeee1893e82029f3d5725303e5642663f3d48ee3e04d172bdd485bc3d3547ff3ebf8421bebc6bdd3e023dcb3e23feb53e08b1363fd7ad833e92133b3f3091903e0fafc13f51adf03e4b2e0c3e6612e43e92e1dd3e0d091e3fb063833e89ea713e2698003ff850313fb951543ede510b3feed7bd3ff97d843ee29fd73e0161653eeff1ad3e7755773e09023e3ffd34bd3ebdd90e3f0fa2503eee46033feb2b833eef03febd8727d83e8ae5a83f665d0a3f17b35d3ed825d5bba70bf43e5126033f1900253e8a569a3e88b7ba3e7c70703eb6557e3ec9ba7d3e7c13ae3e5830d03ecc7b683e5adeee3ec9c7b83f2190693e9937bc3e9a92b03f24c51b3e101df53d7e77e33eff50233fd3666b3edf57163eee32983e5416253eee1e513e11fe1f3ecdef4e3ef340053e45e4273f99bccd3eb76e623e886d9e3f8fbd5a3eb44f543e27dbc33fd1e4d63eea1a123f46521a3f75a2083fdbf8533e77cf6c3e0bdb8a3eec4fc83e2d98653ec5310c3f5ec9ea3e3bf2513e3fb13f3e7277c13edbe3bf3ea6c69e3ea869d03eee4ba73eb83d76be0000803f +\ No newline at end of file +diff --git a/gcc/opts-global.cc b/gcc/opts-global.cc +index a18c76940..e684bc5e3 100644 +--- a/gcc/opts-global.cc ++++ b/gcc/opts-global.cc +@@ -39,6 +39,7 @@ along with GCC; see the file COPYING3. If not see + #include "attribs.h" + #include "asan.h" + #include "file-prefix-map.h" /* add_*_prefix_map() */ ++#include "ai4c-infer.h" + + typedef const char *const_char_p; /* For DEF_VEC_P. */ + +@@ -304,6 +305,15 @@ decode_options (struct gcc_options *opts, struct gcc_options *opts_set, + location_t loc, diagnostic_context *dc, + void (*target_option_override_hook) (void)) + { ++ set_cache_info (global_options.x_param_simultaneous_prefetches, ++ global_options.x_param_l1_cache_size, ++ global_options.x_param_l1_cache_line_size, ++ global_options.x_param_l2_cache_size, ++ global_options.x_param_prefetch_latency, ++ global_options.x_param_ipa_prefetch_distance_factor); ++ const char *tune_native = getenv ("GCC_AI4C_TUNE_INFO"); ++ prepare_native_tune_str (tune_native); ++ + struct cl_option_handlers handlers; + + unsigned int lang_mask; +-- +2.33.0 + |