GCC Middle and Back End API Reference
gcse.cc File Reference
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "predict.h"
#include "df.h"
#include "memmodel.h"
#include "tm_p.h"
#include "insn-config.h"
#include "print-rtl.h"
#include "regs.h"
#include "ira.h"
#include "recog.h"
#include "diagnostic-core.h"
#include "cfgrtl.h"
#include "cfganal.h"
#include "lcm.h"
#include "cfgcleanup.h"
#include "expr.h"
#include "intl.h"
#include "tree-pass.h"
#include "dbgcnt.h"
#include "gcse.h"
#include "gcse-common.h"
#include "function-abi.h"
#include "gt-gcse.h"
Include dependency graph for gcse.cc:

Data Structures

struct  gcse_expr
 
struct  gcse_occr
 
struct  gcse_hash_table_d
 
struct  ls_expr
 
struct  pre_ldst_expr_hasher
 
struct  bb_data
 
struct  reg_avail_info
 
struct  mem_conflict_info
 
struct  set_data
 

Macros

#define BB_DATA(bb)   ((struct bb_data *) (bb)->aux)
 
#define GNEW(T)   ((T *) gmalloc (sizeof (T)))
 
#define GCNEW(T)   ((T *) gcalloc (1, sizeof (T)))
 
#define GNEWVEC(T, N)   ((T *) gmalloc (sizeof (T) * (N)))
 
#define GCNEWVEC(T, N)   ((T *) gcalloc ((N), sizeof (T)))
 
#define GNEWVAR(T, S)   ((T *) gmalloc ((S)))
 
#define GCNEWVAR(T, S)   ((T *) gcalloc (1, (S)))
 
#define GOBNEW(T)   ((T *) gcse_alloc (sizeof (T)))
 
#define GOBNEWVAR(T, S)   ((T *) gcse_alloc ((S)))
 
#define can_copy    (this_target_gcse->x_can_copy)
 
#define can_copy_init_p    (this_target_gcse->x_can_copy_init_p)
 

Typedefs

typedef struct gcse_occroccr_t
 

Functions

static bool expr_equiv_p (const_rtx, const_rtx)
 
static void compute_can_copy (void)
 
static voidgmalloc (size_t) ATTRIBUTE_MALLOC
 
static voidgcalloc (size_t, size_t) ATTRIBUTE_MALLOC
 
static voidgcse_alloc (unsigned long)
 
static void alloc_gcse_mem (void)
 
static void free_gcse_mem (void)
 
static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *)
 
static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *)
 
static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *)
 
static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *)
 
static bool oprs_unchanged_p (const_rtx, const rtx_insn *, bool)
 
static bool oprs_anticipatable_p (const_rtx, const rtx_insn *)
 
static bool oprs_available_p (const_rtx, const rtx_insn *)
 
static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, bool, bool, HOST_WIDE_INT, struct gcse_hash_table_d *)
 
static unsigned int hash_expr (const_rtx, machine_mode, int *, int)
 
static void record_last_reg_set_info (rtx_insn *, int)
 
static void record_last_mem_set_info (rtx_insn *)
 
static void record_last_set_info (rtx, const_rtx, void *)
 
static void compute_hash_table (struct gcse_hash_table_d *)
 
static void alloc_hash_table (struct gcse_hash_table_d *)
 
static void free_hash_table (struct gcse_hash_table_d *)
 
static void compute_hash_table_work (struct gcse_hash_table_d *)
 
static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *)
 
static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *, struct gcse_hash_table_d *)
 
static void mems_conflict_for_gcse_p (rtx, const_rtx, void *)
 
static bool load_killed_in_block_p (const_basic_block, int, const_rtx, bool)
 
static void alloc_pre_mem (int, int)
 
static void free_pre_mem (void)
 
static struct edge_listcompute_pre_data (void)
 
static bool pre_expr_reaches_here_p (basic_block, struct gcse_expr *, basic_block)
 
static void insert_insn_end_basic_block (struct gcse_expr *, basic_block)
 
static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *)
 
static void pre_insert_copies (void)
 
static bool pre_delete (void)
 
static bool pre_gcse (struct edge_list *)
 
static bool one_pre_gcse_pass (void)
 
static void add_label_notes (rtx, rtx_insn *)
 
static void alloc_code_hoist_mem (int, int)
 
static void free_code_hoist_mem (void)
 
static void compute_code_hoist_vbeinout (void)
 
static void compute_code_hoist_data (void)
 
static bool should_hoist_expr_to_dom (basic_block, struct gcse_expr *, basic_block, sbitmap, HOST_WIDE_INT, int *, enum reg_class, int *, bitmap, rtx_insn *)
 
static bool hoist_code (void)
 
static enum reg_class get_regno_pressure_class (int regno, int *nregs)
 
static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
 
static bool one_code_hoisting_pass (void)
 
static rtx_insnprocess_insert_insn (struct gcse_expr *)
 
static bool pre_edge_insert (struct edge_list *, struct gcse_expr **)
 
static bool pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *, basic_block, char *)
 
static struct ls_exprldst_entry (rtx)
 
static void free_ldst_entry (struct ls_expr *)
 
static void free_ld_motion_mems (void)
 
static void print_ldst_list (FILE *)
 
static struct ls_exprfind_rtx_in_ldst (rtx)
 
static bool simple_mem (const_rtx)
 
static void invalidate_any_buried_refs (rtx)
 
static void compute_ld_motion_mems (void)
 
static void trim_ld_motion_mems (void)
 
static void update_ld_motion_stores (struct gcse_expr *)
 
static void clear_modify_mem_tables (void)
 
static void free_modify_mem_tables (void)
 
bool can_copy_p (machine_mode mode)
 
static bool want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
 
bool can_assign_to_reg_without_clobbers_p (rtx x, machine_mode mode)
 
static void prune_expressions (bool pre_p)
 
static void prune_insertions_deletions (int n_elems)
 
rtx_insnprepare_copy_insn (rtx reg, rtx exp)
 
rtx_insninsert_insn_end_basic_block (rtx_insn *pat, basic_block bb)
 
static void record_set_data (rtx dest, const_rtx set, void *data)
 
static const_rtx single_set_gcse (rtx_insn *insn)
 
static rtx_insngcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
 
static int update_bb_reg_pressure (basic_block bb, rtx_insn *from)
 
static struct gcse_occrfind_occr_in_bb (struct gcse_occr *occr, basic_block bb)
 
static void change_pressure (int regno, bool incr_p)
 
static void calculate_bb_reg_pressure (void)
 
bool gcse_or_cprop_is_too_expensive (const char *pass)
 
static unsigned int execute_rtl_pre (void)
 
static unsigned int execute_rtl_hoist (void)
 
rtl_opt_passmake_pass_rtl_pre (gcc::context *ctxt)
 
rtl_opt_passmake_pass_rtl_hoist (gcc::context *ctxt)
 
void gcse_cc_finalize (void)
 

Variables

struct target_gcse default_target_gcse
 
int flag_rerun_cse_after_global_opts
 
static struct obstack gcse_obstack
 
static struct gcse_hash_table_d expr_hash_table
 
static struct ls_exprpre_ldst_mems = NULL
 
static hash_table< pre_ldst_expr_hasher > * pre_ldst_table
 
static regset reg_set_bitmap
 
static vec< rtx_insn * > * modify_mem_list
 
static bitmap modify_mem_list_set
 
static vec< modify_pair > * canon_modify_mem_list
 
static bitmap blocks_with_calls
 
static int bytes_used
 
static int gcse_subst_count
 
static int gcse_create_count
 
static bool doing_code_hoisting_p = false
 
static sbitmapae_kill
 
static basic_block curr_bb
 
static int curr_reg_pressure [N_REG_CLASSES]
 
static struct reg_avail_info * reg_avail_info
 
static basic_block current_bb
 
static rtx_insntest_insn
 
static sbitmaptransp
 
static sbitmapcomp
 
static sbitmapantloc
 
static sbitmappre_optimal
 
static sbitmappre_redundant
 
static sbitmappre_insert_map
 
static sbitmappre_delete_map
 
static sbitmaphoist_vbein
 
static sbitmaphoist_vbeout
 

Macro Definition Documentation

◆ BB_DATA

◆ can_copy

#define can_copy    (this_target_gcse->x_can_copy)
Misc. utilities.   

Referenced by can_copy_p(), and compute_can_copy().

◆ can_copy_init_p

#define can_copy_init_p    (this_target_gcse->x_can_copy_init_p)

Referenced by can_copy_p().

◆ GCNEW

#define GCNEW ( T)    ((T *) gcalloc (1, sizeof (T)))

◆ GCNEWVAR

#define GCNEWVAR ( T,
S )   ((T *) gcalloc (1, (S)))

◆ GCNEWVEC

#define GCNEWVEC ( T,
N )   ((T *) gcalloc ((N), sizeof (T)))

◆ GNEW

#define GNEW ( T)    ((T *) gmalloc (sizeof (T)))

◆ GNEWVAR

#define GNEWVAR ( T,
S )   ((T *) gmalloc ((S)))

Referenced by alloc_hash_table().

◆ GNEWVEC

#define GNEWVEC ( T,
N )   ((T *) gmalloc (sizeof (T) * (N)))

Referenced by compute_hash_table_work().

◆ GOBNEW

#define GOBNEW ( T)    ((T *) gcse_alloc (sizeof (T)))

Referenced by insert_expr_in_table().

◆ GOBNEWVAR

#define GOBNEWVAR ( T,
S )   ((T *) gcse_alloc ((S)))

Typedef Documentation

◆ occr_t

Function Documentation

◆ add_label_notes()

static void add_label_notes ( rtx x,
rtx_insn * insn )
static
If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
to INSN.  If such notes are added to an insn which references a
CODE_LABEL, the LABEL_NUSES count is incremented.  We have to add
that note, because the following loop optimization pass requires
them.   
??? If there was a jump optimization pass after gcse and before loop,
then we would not need to do this here, because jump would add the
necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes.   

References add_label_notes(), add_reg_note(), gcc_assert, GET_CODE, GET_RTX_FORMAT, GET_RTX_LENGTH, ggc_alloc(), i, JUMP_P, LABEL_NUSES, LABEL_P, label_ref_label(), LABEL_REF_NONLOCAL_P, XEXP, XVECEXP, and XVECLEN.

Referenced by add_label_notes(), and insert_insn_end_basic_block().

◆ alloc_code_hoist_mem()

static void alloc_code_hoist_mem ( int n_blocks,
int n_exprs )
static
??? We could compute post dominators and run this algorithm in
reverse to perform tail merging, doing so would probably be
more effective than the tail merging code in jump.cc.

It's unclear if tail merging could be run in parallel with
code hoisting.  It would be nice.   
Allocate vars used for code hoisting analysis.   

References antloc, comp, ggc_alloc(), hoist_vbein, hoist_vbeout, sbitmap_vector_alloc(), and transp.

Referenced by one_code_hoisting_pass().

◆ alloc_gcse_mem()

static void alloc_gcse_mem ( void )
static
Allocate memory for the reg/memory set tracking tables.
This is called at the start of each pass.   

References ALLOC_REG_SET, BITMAP_ALLOC, blocks_with_calls, canon_modify_mem_list, cfun, GCNEWVEC, last_basic_block_for_fn, modify_mem_list, modify_mem_list_set, NULL, and reg_set_bitmap.

Referenced by one_code_hoisting_pass(), and one_pre_gcse_pass().

◆ alloc_hash_table()

static void alloc_hash_table ( struct gcse_hash_table_d * table)
static
Allocate space for the set/expr hash TABLE.
It is used to determine the number of buckets to use.   

References get_max_insn_count(), GNEWVAR, and table.

Referenced by one_code_hoisting_pass(), and one_pre_gcse_pass().

◆ alloc_pre_mem()

static void alloc_pre_mem ( int n_blocks,
int n_exprs )
static
Allocate vars used for PRE analysis.   

References ae_kill, antloc, comp, ggc_alloc(), NULL, pre_delete_map, pre_insert_map, pre_optimal, pre_redundant, sbitmap_vector_alloc(), and transp.

Referenced by one_pre_gcse_pass().

◆ calculate_bb_reg_pressure()

◆ can_assign_to_reg_without_clobbers_p()

bool can_assign_to_reg_without_clobbers_p ( rtx x,
machine_mode mode )
Return true if we can assign X to a pseudo register of mode MODE
such that the resulting insn does not result in clobbering a hard
register as a side-effect.

Additionally, if the target requires it, check that the resulting insn
can be copied.  If it cannot, this means that X is special and probably
has hidden side-effects we don't want to mess with.

This function is typically used by code motion passes, to verify
that it is safe to insert an insn without worrying about clobbering
maybe live hard regs.   

References added_clobbers_hard_reg_p(), const0_rtx, gen_rtx_REG(), general_operand(), GET_MODE, ggc_alloc(), INSN_LOCATION(), make_insn_raw(), NULL_RTX, PATTERN(), PUT_MODE(), recog(), SET_DEST, SET_NEXT_INSN(), SET_PREV_INSN(), SET_SRC, targetm, test_insn, UNKNOWN_LOCATION, and word_mode.

Referenced by compute_ld_motion_mems(), find_moveable_store(), and want_to_gcse_p().

◆ can_copy_p()

bool can_copy_p ( machine_mode mode)
Returns whether the mode supports reg/reg copy operations.   

References can_copy, can_copy_init_p, and compute_can_copy().

Referenced by hash_scan_set(), hash_scan_set(), hash_scan_set(), and may_assign_reg_p().

◆ change_pressure()

static void change_pressure ( int regno,
bool incr_p )
static
Increase (if INCR_P) or decrease current register pressure for
register REGNO.   

References BB_DATA, curr_bb, curr_reg_pressure, get_regno_pressure_class(), and ggc_alloc().

Referenced by calculate_bb_reg_pressure().

◆ clear_modify_mem_tables()

static void clear_modify_mem_tables ( void )
static
Expression tracking support.   
Clear canon_modify_mem_list and modify_mem_list tables.   

References bitmap_clear(), blocks_with_calls, canon_modify_mem_list, EXECUTE_IF_SET_IN_BITMAP, i, modify_mem_list, and modify_mem_list_set.

Referenced by compute_hash_table_work(), and free_modify_mem_tables().

◆ compute_can_copy()

static void compute_can_copy ( void )
static
Compute which modes support reg/reg copy operations.   

References can_copy, emit_insn(), end_sequence(), gen_rtx_REG(), GET_MODE_CLASS, ggc_alloc(), i, LAST_VIRTUAL_REGISTER, NULL, PATTERN(), recog(), and start_sequence().

Referenced by can_copy_p().

◆ compute_code_hoist_data()

static void compute_code_hoist_data ( void )
static

◆ compute_code_hoist_vbeinout()

static void compute_code_hoist_vbeinout ( void )
static
Compute the very busy expressions at entry/exit from each block.

An expression is very busy if all paths from a given point
compute the expression.   

References antloc, bitmap_intersection_of_succs(), bitmap_ior(), bitmap_or_and(), bitmap_vector_clear(), cfun, changed, comp, dump_bitmap_file(), dump_file, EXIT_BLOCK_PTR_FOR_FN, FOR_EACH_BB_FN, FOR_EACH_BB_REVERSE_FN, ggc_alloc(), hoist_vbein, hoist_vbeout, basic_block_def::index, last_basic_block_for_fn, basic_block_def::next_bb, and transp.

Referenced by compute_code_hoist_data().

◆ compute_hash_table()

static void compute_hash_table ( struct gcse_hash_table_d * table)
static
Compute the expression hash table TABLE.   

References compute_hash_table_work(), ggc_alloc(), and table.

◆ compute_hash_table_work()

static void compute_hash_table_work ( struct gcse_hash_table_d * table)
static
Top level function to create an expression hash table.

Expression entries are placed in the hash table if
- they are of the form (set (pseudo-reg) src),
- src is something we want to perform GCSE on,
- none of the operands are subsequently modified in the block

Currently src must be a pseudo-reg or a const_int.

TABLE is the table computed.   

References CALL_P, can_throw_external(), cfun, clear_modify_mem_tables(), current_bb, EXECUTE_IF_SET_IN_HARD_REG_SET, FOR_BB_INSNS, FOR_EACH_BB_FN, free(), function_abi::full_and_partial_reg_clobbers(), ggc_alloc(), GNEWVEC, hash_scan_insn(), i, insn_callee_abi(), reg_avail_info::last_bb, max_reg_num(), NONDEBUG_INSN_P, note_stores(), NULL, record_last_mem_set_info(), record_last_reg_set_info(), record_last_set_info(), RTL_CONST_OR_PURE_CALL_P, RTL_LOOPING_CONST_OR_PURE_CALL_P, and table.

Referenced by compute_hash_table().

◆ compute_ld_motion_mems()

static void compute_ld_motion_mems ( void )
static
Find all the 'simple' MEMs which are used in LOADs and STORES.  Simple
being defined as MEM loads and stores to symbols, with no side effects
and no registers in the expression.  For a MEM destination, we also
check that the insn is still valid if we replace the destination with a
REG, as is done in update_ld_motion_stores.  If there are any uses/defs
which don't match this criteria, they are invalidated and trimmed out
later.   

References can_assign_to_reg_without_clobbers_p(), cfun, find_reg_equal_equiv_note(), FOR_BB_INSNS, FOR_EACH_BB_FN, GET_CODE, GET_MODE, ggc_alloc(), ls_expr::invalid, invalidate_any_buried_refs(), ldst_entry(), MEM_P, NONDEBUG_INSN_P, NULL, PATTERN(), pre_ldst_mems, pre_ldst_table, REG_NOTE_KIND, REG_P, SET, SET_DEST, SET_SRC, simple_mem(), ls_expr::stores, and XEXP.

Referenced by one_pre_gcse_pass().

◆ compute_local_properties()

static void compute_local_properties ( sbitmap * transp,
sbitmap * comp,
sbitmap * antloc,
struct gcse_hash_table_d * table )
static
Compute the local properties of each recorded expression.

Local properties are those that are defined by the block, irrespective of
other blocks.

An expression is transparent in a block if its operands are not modified
in the block.

An expression is computed (locally available) in a block if it is computed
at least once and expression would contain the same value if the
computation was moved to the end of the block.

An expression is locally anticipatable in a block if it is computed at
least once and expression would contain the same value if the computation
was moved to the beginning of the block.

We call this routine for pre and code hoisting.  They all compute
basically the same information and thus can easily share this code.

TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
properties.  If NULL, then it is not necessary to compute or record that
particular property.

TABLE controls which hash table to look at.   

References antloc, expr::avail_occr, expr::bitmap_index, bitmap_set_bit, bitmap_vector_clear(), bitmap_vector_ones(), BLOCK_FOR_INSN(), blocks_with_calls, canon_modify_mem_list, cfun, comp, compute_transp(), occr::deleted_p, expr::expr, expr, i, basic_block_def::index, occr::insn, last_basic_block_for_fn, modify_mem_list_set, occr::next, NULL, table, and transp.

Referenced by compute_code_hoist_data(), and compute_pre_data().

◆ compute_pre_data()

◆ dump_hash_table()

static void dump_hash_table ( FILE * file,
const char * name,
struct gcse_hash_table_d * table )
static
Dump the hash table TABLE to file FILE under the name NAME.   

References expr::bitmap_index, expr::expr, expr, free(), ggc_alloc(), HOST_WIDE_INT_PRINT_DEC, i, NULL, print_rtl(), and table.

Referenced by one_code_hoisting_pass(), and one_pre_gcse_pass().

◆ execute_rtl_hoist()

◆ execute_rtl_pre()

◆ expr_equiv_p()

static bool expr_equiv_p ( const_rtx x,
const_rtx y )
static
Return true if exp1 is equivalent to exp2.   

References exp_equiv_p(), and y.

Referenced by pre_ldst_expr_hasher::equal(), insert_expr_in_table(), mems_conflict_for_gcse_p(), pre_insert_copy_insn(), and trim_ld_motion_mems().

◆ find_occr_in_bb()

static struct gcse_occr * find_occr_in_bb ( struct gcse_occr * occr,
basic_block bb )
static
Find occurrence in BB.   

References BLOCK_FOR_INSN(), occr::insn, and occr::next.

Referenced by hoist_code().

◆ find_rtx_in_ldst()

static struct ls_expr * find_rtx_in_ldst ( rtx x)
static
Returns 1 if X is in the list of ldst only expressions.   

References ggc_alloc(), NULL, ls_expr::pattern, and pre_ldst_table.

Referenced by mems_conflict_for_gcse_p(), and update_ld_motion_stores().

◆ free_code_hoist_mem()

static void free_code_hoist_mem ( void )
static
Free vars used for code hoisting analysis.   

References antloc, CDI_DOMINATORS, comp, free_dominance_info(), hoist_vbein, hoist_vbeout, sbitmap_vector_free(), and transp.

Referenced by one_code_hoisting_pass().

◆ free_gcse_mem()

static void free_gcse_mem ( void )
static

◆ free_hash_table()

static void free_hash_table ( struct gcse_hash_table_d * table)
static
Free things allocated by alloc_hash_table.   

References free(), and table.

Referenced by one_code_hoisting_pass(), and one_pre_gcse_pass().

◆ free_ld_motion_mems()

static void free_ld_motion_mems ( void )
static
Free up all memory associated with the ldst list.   

References free_ldst_entry(), ggc_alloc(), ls_expr::next, NULL, pre_ldst_mems, and pre_ldst_table.

Referenced by one_pre_gcse_pass().

◆ free_ldst_entry()

static void free_ldst_entry ( struct ls_expr * ptr)
static
Free up an individual ldst entry.   

References free(), and ls_expr::stores.

Referenced by free_ld_motion_mems(), and trim_ld_motion_mems().

◆ free_modify_mem_tables()

static void free_modify_mem_tables ( void )
static
Release memory used by modify_mem_list_set.   

References canon_modify_mem_list, clear_modify_mem_tables(), free(), and modify_mem_list.

Referenced by free_gcse_mem().

◆ free_pre_mem()

static void free_pre_mem ( void )
static
Free vars used for PRE analysis.   

References comp, NULL, pre_delete_map, pre_insert_map, pre_optimal, pre_redundant, sbitmap_vector_free(), and transp.

Referenced by one_pre_gcse_pass().

◆ gcalloc()

static void * gcalloc ( size_t nelem,
size_t elsize )
static
Cover function to xcalloc to record bytes allocated.   

References bytes_used, and ggc_alloc().

◆ gcse_alloc()

static void * gcse_alloc ( unsigned long size)
static
Cover function to obstack_alloc.   

References bytes_used, gcse_obstack, and ggc_alloc().

◆ gcse_cc_finalize()

void gcse_cc_finalize ( void )
Reset all state within gcse.cc so that we can rerun the compiler
within the same process.  For use by toplev::finalize.   

References NULL, and test_insn.

Referenced by toplev::finalize().

◆ gcse_emit_move_after()

static rtx_insn * gcse_emit_move_after ( rtx dest,
rtx src,
rtx_insn * insn )
static

◆ gcse_or_cprop_is_too_expensive()

bool gcse_or_cprop_is_too_expensive ( const char * pass)
Return true if the graph is too expensive to optimize. PASS is the
optimization about to be performed.   

References cfun, ggc_alloc(), max_reg_num(), n_basic_blocks_for_fn, n_edges_for_fn, SBITMAP_ELT_TYPE, SBITMAP_SET_SIZE, and warning().

Referenced by gcse_after_reload_main(), one_code_hoisting_pass(), one_cprop_pass(), and one_pre_gcse_pass().

◆ get_pressure_class_and_nregs()

static enum reg_class get_pressure_class_and_nregs ( rtx_insn * insn,
int * nregs )
static
Return pressure class and number of hard registers (through *NREGS)
for destination of INSN.  

References gcc_assert, GET_CODE, GET_MODE, ggc_alloc(), ira_pressure_class_translate, ira_reg_class_max_nregs, MEM_P, reg_allocno_class(), REG_P, REGNO, SET_DEST, SET_SRC, single_set_gcse(), and SUBREG_REG.

Referenced by hoist_code().

◆ get_regno_pressure_class()

static enum reg_class get_regno_pressure_class ( int regno,
int * nregs )
static
Return pressure class and number of needed hard registers (through
*NREGS) of register REGNO.   

References eliminable_regset, ggc_alloc(), ira_no_alloc_regs, ira_pressure_class_translate, ira_reg_class_max_nregs, PSEUDO_REGNO_MODE, reg_allocno_class(), and TEST_HARD_REG_BIT.

Referenced by change_pressure(), and update_bb_reg_pressure().

◆ gmalloc()

static void * gmalloc ( size_t size)
static
Cover function to xmalloc to record bytes allocated.   

References bytes_used, and ggc_alloc().

◆ hash_expr()

static unsigned int hash_expr ( const_rtx x,
machine_mode mode,
int * do_not_record_p,
int hash_table_size )
static
Hash expression X.

MODE is only used if X is a CONST_INT.  DO_NOT_RECORD_P is a boolean
indicating if a volatile operand is found or if the expression contains
something we don't want to insert in the table.  HASH_TABLE_SIZE is
the current size of the hash table to be probed.   

References ggc_alloc(), hash_rtx(), and NULL.

Referenced by insert_expr_in_table().

◆ hash_scan_call()

static void hash_scan_call ( rtx x,
rtx_insn * insn,
struct gcse_hash_table_d * table )
static

Referenced by hash_scan_insn(), and hash_scan_set().

◆ hash_scan_clobber()

static void hash_scan_clobber ( rtx x,
rtx_insn * insn,
struct gcse_hash_table_d * table )
static

Referenced by hash_scan_insn().

◆ hash_scan_insn()

static void hash_scan_insn ( rtx_insn * insn,
struct gcse_hash_table_d * table )
static
Process INSN and add hash table entries as appropriate.   

References GET_CODE, ggc_alloc(), hash_scan_call(), hash_scan_clobber(), hash_scan_set(), i, gcse_occr::insn, PATTERN(), SET, table, XVECEXP, and XVECLEN.

Referenced by compute_hash_table_work().

◆ hash_scan_set()

◆ hoist_code()

static bool hoist_code ( void )
static
Actually perform code hoisting.

The code hoisting pass can hoist multiple computations of the same
expression along dominated path to a dominating basic block, like
from b2/b3 to b1 as depicted below:

       b1      ------
       /\         |
      /  \        |
     bx   by   distance
    /      \      |
   /        \     |
  b2        b3 ------

Unfortunately code hoisting generally extends the live range of an
output pseudo register, which increases register pressure and hurts
register allocation.  To address this issue, an attribute MAX_DISTANCE
is computed and attached to each expression.  The attribute is computed
from rtx cost of the corresponding expression and it's used to control
how long the expression can be hoisted up in flow graph.  As the
expression is hoisted up in flow graph, GCC decreases its DISTANCE
and stops the hoist if DISTANCE reaches 0.  Code hoisting can decrease
register pressure if live ranges of inputs are shrunk.

Option "-fira-hoist-pressure" implements register pressure directed
hoist based on upper method.  The rationale is:
  1. Calculate register pressure for each basic block by reusing IRA
     facility.
  2. When expression is hoisted through one basic block, GCC checks
     the change of live ranges for inputs/output.  The basic block's
     register pressure will be increased because of extended live
     range of output.  However, register pressure will be decreased
     if the live ranges of inputs are shrunk.
  3. After knowing how hoisting affects register pressure, GCC prefers
     to hoist the expression if it can decrease register pressure, by
     increasing DISTANCE of the corresponding expression.
  4. If hoisting the expression increases register pressure, GCC checks
     register pressure of the basic block and decrease DISTANCE only if
     the register pressure is high.  In other words, expression will be
     hoisted through at no cost if the basic block has low register
     pressure.
  5. Update register pressure information for basic blocks through
     which expression is hoisted.   

References antloc, BASIC_BLOCK_FOR_FN, BB_DATA, BITMAP_ALLOC, bitmap_bit_p, bitmap_clear(), bitmap_copy(), BITMAP_FREE, expr::bitmap_index, bitmap_set_bit, CDI_DOMINATORS, cfun, changed, comp, dbg_cnt(), delete_insn(), occr::deleted_p, EDGE_COUNT, EDGE_SUCC, ENTRY_BLOCK_PTR_FOR_FN, EXECUTE_IF_SET_IN_BITMAP, expr, expr_hash_table, find_occr_in_bb(), FOR_BB_INSNS, FOR_EACH_BB_FN, FOR_EACH_VEC_ELT, free(), gcc_assert, gcse_emit_move_after(), gcse_subst_count, gen_reg_rtx_and_attrs(), get_all_dominated_blocks(), get_dominated_to_depth(), get_max_uid(), get_pressure_class_and_nregs(), ggc_alloc(), hoist_vbeout, i, basic_block_def::index, insert_insn_end_basic_block(), occr::insn, INSN_UID(), last_basic_block_for_fn, gcse_expr::max_distance, gcse_hash_table_d::n_elems, nearest_common_dominator_for_set(), NONDEBUG_INSN_P, NULL, SBITMAP_SIZE, SET_DEST, should_hoist_expr_to_dom(), single_set_gcse(), gcse_hash_table_d::size, gcse_hash_table_d::table, and vNULL.

Referenced by one_code_hoisting_pass().

◆ insert_expr_in_table()

static void insert_expr_in_table ( rtx x,
machine_mode mode,
rtx_insn * insn,
bool antic_p,
bool avail_p,
HOST_WIDE_INT max_distance,
struct gcse_hash_table_d * table )
static
Insert expression X in INSN in the hash TABLE.
If it is already present, record it as the last occurrence in INSN's
basic block.

MODE is the mode of the value X is being stored into.
It is only used if X is a CONST_INT.

ANTIC_P is true if X is an anticipatable expression.
AVAIL_P is true if X is an available expression.

MAX_DISTANCE is the maximum distance in instructions this expression can
be moved.   

References gcse_expr::antic_occr, BLOCK_FOR_INSN(), bytes_used, gcse_occr::deleted_p, expr_equiv_p(), gcc_assert, ggc_alloc(), GOBNEW, hash_expr(), gcse_occr::insn, gcse_expr::max_distance, gcse_occr::next, NULL, and table.

Referenced by hash_scan_set().

◆ insert_insn_end_basic_block() [1/2]

rtx_insn * insert_insn_end_basic_block ( rtx_insn * pat,
basic_block bb )

◆ insert_insn_end_basic_block() [2/2]

static void insert_insn_end_basic_block ( struct gcse_expr * expr,
basic_block bb )
static
Add EXPR to the end of basic block BB.

This is used by both the PRE and code hoisting.   

References expr::bitmap_index, dump_file, gcse_create_count, ggc_alloc(), basic_block_def::index, insert_insn_end_basic_block(), INSN_UID(), process_insert_insn(), and REGNO.

Referenced by hoist_code(), insert_insn_end_basic_block(), and pre_edge_insert().

◆ invalidate_any_buried_refs()

static void invalidate_any_buried_refs ( rtx x)
static
Make sure there isn't a buried reference in this pattern anywhere.
If there is, invalidate the entry for it since we're not capable
of fixing it up just yet.. We have to be sure we know about ALL
loads since the aliasing code will allow all entries in the
ld_motion list to not-alias itself.  If we miss a load, we will get
the wrong value since gcse might common it and we won't know to
fix it up.   

References GET_CODE, GET_RTX_FORMAT, GET_RTX_LENGTH, ggc_alloc(), i, ls_expr::invalid, invalidate_any_buried_refs(), ldst_entry(), MEM_P, simple_mem(), XEXP, XVECEXP, and XVECLEN.

Referenced by compute_ld_motion_mems(), and invalidate_any_buried_refs().

◆ ldst_entry()

static struct ls_expr * ldst_entry ( rtx x)
static
Here we provide the things required to do store motion towards the exit.
In order for this to be effective, gcse also needed to be taught how to
move a load when it is killed only by a store to itself.

        int i;
        float a[10];

        void foo(float scale)
        {
          for (i=0; i<10; i++)
        a[i] *= scale;
        }

'i' is both loaded and stored to in the loop. Normally, gcse cannot move
the load out since its live around the loop, and stored at the bottom
of the loop.

  The 'Load Motion' referred to and implemented in this file is
an enhancement to gcse which when using edge based LCM, recognizes
this situation and allows gcse to move the load out of the loop.

  Once gcse has hoisted the load, store motion can then push this
load towards the exit, and we end up with no loads or stores of 'i'
in the loop.   
This will search the ldst list for a matching expression. If it
doesn't find one, we create one and initialize it.   

References ls_expr::expr, GET_MODE, ggc_alloc(), ls_expr::hash_index, hash_rtx(), ls_expr::index, ls_expr::invalid, ls_expr::next, NULL, NULL_RTX, ls_expr::pattern, ls_expr::pattern_regs, pre_ldst_mems, pre_ldst_table, ls_expr::reaching_reg, and ls_expr::stores.

Referenced by compute_ld_motion_mems(), and invalidate_any_buried_refs().

◆ load_killed_in_block_p()

static bool load_killed_in_block_p ( const_basic_block bb,
int uid_limit,
const_rtx x,
bool avail_p )
static
Return true if the expression in X (a memory reference) is killed
in block BB before or after the insn with the LUID in UID_LIMIT.
AVAIL_P is true for kills after UID_LIMIT, and zero for kills
before UID_LIMIT.

To check the entire block, set UID_LIMIT to max_uid + 1 and
AVAIL_P to false.   

References CALL_P, DF_INSN_LUID, FOR_EACH_VEC_ELT_REVERSE, ggc_alloc(), basic_block_def::index, MEM_READONLY_P, mems_conflict_for_gcse_p(), modify_mem_list, and note_stores().

Referenced by oprs_unchanged_p().

◆ make_pass_rtl_hoist()

rtl_opt_pass * make_pass_rtl_hoist ( gcc::context * ctxt)

References ggc_alloc().

◆ make_pass_rtl_pre()

rtl_opt_pass * make_pass_rtl_pre ( gcc::context * ctxt)

References ggc_alloc().

◆ mems_conflict_for_gcse_p()

static void mems_conflict_for_gcse_p ( rtx dest,
const_rtx setter,
void * data )
static
DEST is the output of an instruction.  If it is a memory reference and
possibly conflicts with the load found in DATA, then communicate this
information back through DATA.   

References expr_equiv_p(), find_rtx_in_ldst(), GET_CODE, GET_MODE, ggc_alloc(), MEM_P, NULL, pre_ldst_mems, true_dependence(), and XEXP.

Referenced by load_killed_in_block_p().

◆ one_code_hoisting_pass()

◆ one_pre_gcse_pass()

◆ oprs_anticipatable_p()

static bool oprs_anticipatable_p ( const_rtx x,
const rtx_insn * insn )
static
Return true if the operands of expression X are unchanged from
the start of INSN's basic block up to but not including INSN.   

References oprs_unchanged_p().

Referenced by hash_scan_set().

◆ oprs_available_p()

static bool oprs_available_p ( const_rtx x,
const rtx_insn * insn )
static
Return true if the operands of expression X are unchanged from
INSN to the end of INSN's basic block.   

References oprs_unchanged_p().

Referenced by hash_scan_set().

◆ oprs_unchanged_p()

static bool oprs_unchanged_p ( const_rtx x,
const rtx_insn * insn,
bool avail_p )
static
Return true if the operands of expression X are unchanged from the
start of INSN's basic block up to but not including INSN
(if AVAIL_P == false), or from INSN to the end of INSN's basic block
(if AVAIL_P == true).   

References CASE_CONST_ANY, current_bb, DF_INSN_LUID, reg_avail_info::first_set, GET_CODE, GET_RTX_FORMAT, GET_RTX_LENGTH, ggc_alloc(), i, reg_avail_info::last_bb, reg_avail_info::last_set, load_killed_in_block_p(), oprs_unchanged_p(), REGNO, XEXP, XVECEXP, and XVECLEN.

Referenced by oprs_anticipatable_p(), oprs_available_p(), and oprs_unchanged_p().

◆ pre_delete()

static bool pre_delete ( void )
static
Delete redundant computations.
Deletion is done by changing the insn to copy the `reaching_reg' of
the expression into the result of the SET.  It is left to later passes
to propagate the copy or eliminate it.

Return true if a change is made.   

References bitmap_bit_p, expr::bitmap_index, BLOCK_FOR_INSN(), changed, dbg_cnt(), delete_insn(), occr::deleted_p, dump_file, expr, expr_hash_table, gcse_emit_move_after(), gcse_subst_count, gen_reg_rtx_and_attrs(), ggc_alloc(), i, basic_block_def::index, gcse_occr::insn, occr::insn, INSN_UID(), occr::next, NULL, pre_delete_map, REGNO, SET_DEST, single_set(), gcse_hash_table_d::size, and gcse_hash_table_d::table.

Referenced by pre_gcse().

◆ pre_edge_insert()

◆ pre_expr_reaches_here_p()

static bool pre_expr_reaches_here_p ( basic_block occr_bb,
struct gcse_expr * expr,
basic_block bb )
static
The wrapper for pre_expr_reaches_here_work that ensures that any
memory allocated for that function is returned.   

References cfun, free(), ggc_alloc(), last_basic_block_for_fn, pre_expr_reaches_here_p_work(), and visited.

Referenced by pre_insert_copies().

◆ pre_expr_reaches_here_p_work()

static bool pre_expr_reaches_here_p_work ( basic_block occr_bb,
struct gcse_expr * expr,
basic_block bb,
char * visited )
static
PRE utilities  
Return true if an occurrence of expression EXPR in OCCR_BB would reach
block BB.

VISITED is a pointer to a working buffer for tracking which BB's have
been visited.  It is NULL for the top-level call.

We treat reaching expressions that go through blocks containing the same
reaching expression as "not reaching".  E.g. if EXPR is generated in blocks
2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
2 as not reaching.  The intent is to improve the probability of finding
only one reaching expression and to reduce register lifetimes by picking
the closest such expression.   

References bitmap_bit_p, expr::bitmap_index, cfun, comp, ENTRY_BLOCK_PTR_FOR_FN, FOR_EACH_EDGE, ggc_alloc(), pre_expr_reaches_here_p_work(), basic_block_def::preds, transp, and visited.

Referenced by pre_expr_reaches_here_p(), and pre_expr_reaches_here_p_work().

◆ pre_gcse()

static bool pre_gcse ( struct edge_list * edge_list)
static
Perform GCSE optimizations using PRE.
This is called by one_pre_gcse_pass after all the dataflow analysis
has been done.

This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
Compiler Design and Implementation.

??? A new pseudo reg is created to hold the reaching expression.  The nice
thing about the classical approach is that it would try to use an existing
reg.  If the register can't be adequately optimized [i.e. we introduce
reload problems], one could add a pass here to propagate the new register
through the block.

??? We don't handle single sets in PARALLELs because we're [currently] not
able to copy the rest of the parallel when we insert copies to create full
redundancies from partial redundancies.  However, there's no reason why we
can't handle PARALLELs in the cases where there are no partial
redundancies.   

References expr::bitmap_index, changed, commit_edge_insertions(), expr, expr_hash_table, free(), ggc_alloc(), i, gcse_hash_table_d::n_elems, pre_delete(), pre_edge_insert(), pre_insert_copies(), gcse_hash_table_d::size, and gcse_hash_table_d::table.

Referenced by one_pre_gcse_pass().

◆ pre_insert_copies()

◆ pre_insert_copy_insn()

static void pre_insert_copy_insn ( struct gcse_expr * expr,
rtx_insn * insn )
static
Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
Given "old_reg <- expr" (INSN), instead of adding after it
  reaching_reg <- old_reg
it's better to do the following:
  reaching_reg <- expr
  old_reg      <- reaching_reg
because this way copy propagation can discover additional PRE
opportunities.  But if this fails, we try the old way.
When "expr" is a store, i.e.
given "MEM <- old_reg", instead of adding after it
  reaching_reg <- old_reg
it's better to add it before as follows:
  reaching_reg <- old_reg
  MEM          <- reaching_reg.   

References expr::bitmap_index, BLOCK_FOR_INSN(), dump_file, emit_insn_after(), emit_insn_before(), expr::expr, expr_equiv_p(), gcc_assert, gcc_unreachable, gcse_create_count, gen_move_insn(), GET_CODE, ggc_alloc(), i, gcse_occr::insn, INSN_UID(), NULL_RTX, PATTERN(), REG_P, REGNO, SET, SET_DEST, SET_SRC, validate_change(), XVECEXP, and XVECLEN.

Referenced by pre_insert_copies().

◆ prepare_copy_insn()

rtx_insn * prepare_copy_insn ( rtx reg,
rtx exp )

◆ print_ldst_list()

static void print_ldst_list ( FILE * file)
static

◆ process_insert_insn()

static rtx_insn * process_insert_insn ( struct gcse_expr * expr)
static
Generate RTL to copy an EXPR to its `reaching_reg' and return it.   

References copy_rtx(), exp(), expr::expr, and prepare_copy_insn().

Referenced by insert_insn_end_basic_block(), and pre_edge_insert().

◆ prune_expressions()

static void prune_expressions ( bool pre_p)
static
Remove certain expressions from anticipatable and transparent
sets of basic blocks that have incoming abnormal edge.
For PRE remove potentially trapping expressions to avoid placing
them on abnormal edges.  For hoisting remove memory references that
can be clobbered by calls.   

References antloc, BB_END, bitmap_and_compl(), bitmap_clear(), expr::bitmap_index, bitmap_set_bit, CALL_P, cfun, CONSTANT_POOL_ADDRESS_P, contains_mem_rtx_p(), expr::expr, expr, expr_hash_table, FOR_EACH_BB_FN, FOR_EACH_EDGE, GET_CODE, ggc_alloc(), basic_block_def::index, may_trap_p(), MEM_NOTRAP_P, MEM_P, MEM_READONLY_P, MEM_VOLATILE_P, gcse_hash_table_d::n_elems, basic_block_def::preds, gcse_hash_table_d::size, gcse_hash_table_d::table, transp, ui, and XEXP.

Referenced by compute_code_hoist_data(), and compute_pre_data().

◆ prune_insertions_deletions()

static void prune_insertions_deletions ( int n_elems)
static
It may be necessary to insert a large number of insns on edges to
make the existing occurrences of expressions fully redundant.  This
routine examines the set of insertions and deletions and if the ratio
of insertions to deletions is too high for a particular expression, then
the expression is removed from the insertion/deletion sets. 

N_ELEMS is the number of elements in the hash table.   

References bitmap_clear(), bitmap_clear_bit(), bitmap_set_bit, cfun, EXECUTE_IF_SET_IN_BITMAP, free(), GCNEWVEC, ggc_alloc(), i, insertions, last_basic_block_for_fn, n_edges_for_fn, pre_delete_map, and pre_insert_map.

Referenced by compute_pre_data().

◆ record_last_mem_set_info()

static void record_last_mem_set_info ( rtx_insn * insn)
static
Record memory modification information for INSN.  We do not actually care
about the memory location(s) that are set, or even how they are set (consider
a CALL_INSN).  We merely need to record which insns modify memory.   

References blocks_with_calls, canon_modify_mem_list, ggc_alloc(), modify_mem_list, modify_mem_list_set, and record_last_mem_set_info_common().

Referenced by compute_hash_table_work(), and record_last_set_info().

◆ record_last_reg_set_info()

static void record_last_reg_set_info ( rtx_insn * insn,
int regno )
static
Record register first/last/block set information for REGNO in INSN.

first_set records the first place in the block where the register
is set and is used to compute "anticipatability".

last_set records the last place in the block where the register
is set and is used to compute "availability".

last_bb records the block for which first_set and last_set are
valid, as a quick test to invalidate them.   

References current_bb, DF_INSN_LUID, reg_avail_info::first_set, reg_avail_info::last_bb, and reg_avail_info::last_set.

Referenced by compute_hash_table_work(), and record_last_set_info().

◆ record_last_set_info()

static void record_last_set_info ( rtx dest,
const_rtx setter,
void * data )
static
Called from compute_hash_table via note_stores to handle one
SET or CLOBBER in an insn.  DATA is really the instruction in which
the SET is taking place.   

References GET_CODE, GET_MODE, ggc_alloc(), MEM_P, push_operand(), record_last_mem_set_info(), record_last_reg_set_info(), REG_P, REGNO, and SUBREG_REG.

Referenced by compute_hash_table_work().

◆ record_set_data()

static void record_set_data ( rtx dest,
const_rtx set,
void * data )
static
Increment number of sets and record set in DATA.   

References find_reg_note(), GET_CODE, ggc_alloc(), set_data::insn, set_data::nsets, set_data::set, SET, SET_DEST, and side_effects_p().

Referenced by single_set_gcse().

◆ should_hoist_expr_to_dom()

static bool should_hoist_expr_to_dom ( basic_block expr_bb,
struct gcse_expr * expr,
basic_block bb,
sbitmap visited,
HOST_WIDE_INT distance,
int * bb_size,
enum reg_class pressure_class,
int * nregs,
bitmap hoisted_bbs,
rtx_insn * from )
static
Determine if the expression EXPR should be hoisted to EXPR_BB up in
flow graph, if it can reach BB unimpared.  Stop the search if the
expression would need to be moved more than DISTANCE instructions.

DISTANCE is the number of instructions through which EXPR can be
hoisted up in flow graph.

BB_SIZE points to an array which contains the number of instructions
for each basic block.

PRESSURE_CLASS and NREGS are register class and number of hard registers
for storing EXPR.

HOISTED_BBS points to a bitmap indicating basic blocks through which
EXPR is hoisted.

FROM is the instruction from which EXPR is hoisted.

It's unclear exactly what Muchnick meant by "unimpared".  It seems
to me that the expression must either be computed or transparent in
*every* block in the path(s) from EXPR_BB to BB.  Any other definition
would allow the expression to be hoisted out of loops, even if
the expression wasn't a loop invariant.

Contrast this to reachability for PRE where an expression is
considered reachable if *any* path reaches instead of *all*
paths.   

References BB_DATA, bitmap_bit_p, bitmap_clear(), bitmap_copy(), expr::bitmap_index, bitmap_set_bit, cfun, CONST_INT_P, ENTRY_BLOCK_PTR_FOR_FN, EXECUTE_IF_SET_IN_BITMAP, expr::expr, FOR_EACH_EDGE, gcc_assert, ggc_alloc(), i, basic_block_def::index, ira_class_hard_regs_num, last_basic_block_for_fn, bb_data::max_reg_pressure, NULL, basic_block_def::preds, sbitmap_alloc(), sbitmap_free(), should_hoist_expr_to_dom(), transp, update_bb_reg_pressure(), and visited.

Referenced by hoist_code(), and should_hoist_expr_to_dom().

◆ simple_mem()

static bool simple_mem ( const_rtx x)
static
Load Motion for loads which only kill themselves.   
Return true if x, a MEM, is a simple access with no side effects.
These are the types of loads we consider for the ld_motion list,
otherwise we let the usual aliasing take care of it.   

References cfun, FLOAT_MODE_P, GET_MODE, ggc_alloc(), may_trap_p(), MEM_VOLATILE_P, reg_mentioned_p(), side_effects_p(), and stack_pointer_rtx.

Referenced by compute_ld_motion_mems(), and invalidate_any_buried_refs().

◆ single_set_gcse()

◆ trim_ld_motion_mems()

static void trim_ld_motion_mems ( void )
static

◆ update_bb_reg_pressure()

static int update_bb_reg_pressure ( basic_block bb,
rtx_insn * from )
static
Update register pressure for BB when hoisting an expression from
instruction FROM, if live ranges of inputs are shrunk.  Also
maintain live_in information if live range of register referred
in FROM is shrunk.

Return 0 if register pressure doesn't change, otherwise return
the number by which register pressure is decreased.

NOTE: Register pressure won't be increased in this function.   

References BB_DATA, bitmap_bit_p, bitmap_clear_bit(), BLOCK_FOR_INSN(), cfun, DF_REF_INSN, DF_REF_INSN_INFO, DF_REF_NEXT_REG, DF_REF_REAL_REG, DF_REG_USE_CHAIN, EXIT_BLOCK_PTR_FOR_FN, FOR_EACH_EDGE, FOR_EACH_INSN_USE, get_regno_pressure_class(), ggc_alloc(), NONDEBUG_INSN_P, NULL, REGNO, and basic_block_def::succs.

Referenced by should_hoist_expr_to_dom().

◆ update_ld_motion_stores()

static void update_ld_motion_stores ( struct gcse_expr * expr)
static
This routine will take an expression which we are replacing with
a reaching register, and update any stores that are needed if
that expression is in the ld_motion list.  Stores are updated by
copying their SRC to the reaching register, and then storing
the reaching register into the store location. These keeps the
correct value in the reaching register for the loads.   

References copy_rtx(), df_insn_rescan(), dump_file, emit_insn_before(), expr::expr, find_rtx_in_ldst(), FOR_EACH_VEC_ELT_REVERSE, gcse_create_count, gen_move_insn(), ggc_alloc(), i, INSN_CODE, PATTERN(), print_inline_rtx(), print_rtl(), and SET_SRC.

Referenced by pre_edge_insert(), and pre_insert_copies().

◆ want_to_gcse_p()

static bool want_to_gcse_p ( rtx x,
machine_mode mode,
HOST_WIDE_INT * max_distance_ptr )
static

Variable Documentation

◆ ae_kill

sbitmap* ae_kill
static
For available exprs  

Referenced by alloc_pre_mem(), and compute_pre_data().

◆ antloc

◆ blocks_with_calls

bitmap blocks_with_calls
static
Bitmap indexed by block numbers to record which blocks contain
function calls.   

Referenced by alloc_gcse_mem(), clear_modify_mem_tables(), compute_local_properties(), compute_transp(), free_gcse_mem(), record_last_mem_set_info(), and record_last_mem_set_info_common().

◆ bytes_used

int bytes_used
static
Various variables for statistics gathering.   
Memory used in a pass.
This isn't intended to be absolutely precise.  Its intent is only
to keep an eye on memory usage.   

Referenced by gcalloc(), gcse_alloc(), gmalloc(), insert_expr_in_table(), one_code_hoisting_pass(), and one_pre_gcse_pass().

◆ canon_modify_mem_list

vec<modify_pair>* canon_modify_mem_list
static
This array parallels modify_mem_list, except that it stores MEMs
being set and their canonicalized memory addresses.   

Referenced by alloc_gcse_mem(), clear_modify_mem_tables(), compute_local_properties(), compute_transp(), free_modify_mem_tables(), record_last_mem_set_info(), and record_last_mem_set_info_common().

◆ comp

◆ curr_bb

◆ curr_reg_pressure

int curr_reg_pressure[N_REG_CLASSES]
static
Current register pressure for each pressure class.   

Referenced by calculate_bb_reg_pressure(), and change_pressure().

◆ current_bb

◆ default_target_gcse

struct target_gcse default_target_gcse
Partial redundancy elimination / Hoisting for RTL.
   Copyright (C) 1997-2024 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.   
TODO
  - reordering of memory allocation and freeing to be more space efficient
  - calc rough register pressure information and use the info to drive all
    kinds of code motion (including code hoisting) in a unified way.
References searched while implementing this.

  Compilers Principles, Techniques and Tools
  Aho, Sethi, Ullman
  Addison-Wesley, 1988

  Global Optimization by Suppression of Partial Redundancies
  E. Morel, C. Renvoise
  communications of the acm, Vol. 22, Num. 2, Feb. 1979

  A Portable Machine-Independent Global Optimizer - Design and Measurements
  Frederick Chow
  Stanford Ph.D. thesis, Dec. 1983

  A Fast Algorithm for Code Movement Optimization
  D.M. Dhamdhere
  SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988

  A Solution to a Problem with Morel and Renvoise's
  Global Optimization by Suppression of Partial Redundancies
  K-H Drechsler, M.P. Stadel
  ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988

  Practical Adaptation of the Global Optimization
  Algorithm of Morel and Renvoise
  D.M. Dhamdhere
  ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991

  Efficiently Computing Static Single Assignment Form and the Control
  Dependence Graph
  R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
  ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991

  Lazy Code Motion
  J. Knoop, O. Ruthing, B. Steffen
  ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI

  What's In a Region?  Or Computing Control Dependence Regions in Near-Linear
  Time for Reducible Flow Control
  Thomas Ball
  ACM Letters on Programming Languages and Systems,
  Vol. 2, Num. 1-4, Mar-Dec 1993

  An Efficient Representation for Sparse Sets
  Preston Briggs, Linda Torczon
  ACM Letters on Programming Languages and Systems,
  Vol. 2, Num. 1-4, Mar-Dec 1993

  A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
  K-H Drechsler, M.P. Stadel
  ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993

  Partial Dead Code Elimination
  J. Knoop, O. Ruthing, B. Steffen
  ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994

  Effective Partial Redundancy Elimination
  P. Briggs, K.D. Cooper
  ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994

  The Program Structure Tree: Computing Control Regions in Linear Time
  R. Johnson, D. Pearson, K. Pingali
  ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994

  Optimal Code Motion: Theory and Practice
  J. Knoop, O. Ruthing, B. Steffen
  ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994

  The power of assignment motion
  J. Knoop, O. Ruthing, B. Steffen
  ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI

  Global code motion / global value numbering
  C. Click
  ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI

  Value Driven Redundancy Elimination
  L.T. Simpson
  Rice University Ph.D. thesis, Apr. 1996

  Value Numbering
  L.T. Simpson
  Massively Scalar Compiler Project, Rice University, Sep. 1996

  High Performance Compilers for Parallel Computing
  Michael Wolfe
  Addison-Wesley, 1996

  Advanced Compiler Design and Implementation
  Steven Muchnick
  Morgan Kaufmann, 1997

  Building an Optimizing Compiler
  Robert Morgan
  Digital Press, 1998

  People wishing to speed up the code here should read:
    Elimination Algorithms for Data Flow Analysis
    B.G. Ryder, M.C. Paull
    ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986

    How to Analyze Large Programs Efficiently and Informatively
    D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
    ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI

  People wishing to do something different can find various possibilities
  in the above papers and elsewhere.
We support GCSE via Partial Redundancy Elimination.  PRE optimizations
are a superset of those done by classic GCSE.

Two passes of copy/constant propagation are done around PRE or hoisting
because the first one enables more GCSE and the second one helps to clean
up the copies that PRE and HOIST create.  This is needed more for PRE than
for HOIST because code hoisting will try to use an existing register
containing the common subexpression rather than create a new one.  This is
harder to do for PRE because of the code motion (which HOIST doesn't do).

Expressions we are interested in GCSE-ing are of the form
(set (pseudo-reg) (expression)).
Function want_to_gcse_p says what these are.

In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
This allows PRE to hoist expressions that are expressed in multiple insns,
such as complex address calculations (e.g. for PIC code, or loads with a
high part and a low part).

PRE handles moving invariant expressions out of loops (by treating them as
partially redundant).

**********************

We used to support multiple passes but there are diminishing returns in
doing so.  The first pass usually makes 90% of the changes that are doable.
A second pass can make a few more changes made possible by the first pass.
Experiments show any further passes don't make enough changes to justify
the expense.

A study of spec92 using an unlimited number of passes:
[1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
[6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
[12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1

It was found doing copy propagation between each pass enables further
substitutions.

This study was done before expressions in REG_EQUAL notes were added as
candidate expressions for optimization, and before the GIMPLE optimizers
were added.  Probably, multiple passes is even less efficient now than
at the time when the study was conducted.

PRE is quite expensive in complicated functions because the DFA can take
a while to converge.  Hence we only perform one pass.

**********************

The steps for PRE are:

1) Build the hash table of expressions we wish to GCSE (expr_hash_table).

2) Perform the data flow analysis for PRE.

3) Delete the redundant instructions

4) Insert the required copies [if any] that make the partially
   redundant instructions fully redundant.

5) For other reaching expressions, insert an instruction to copy the value
   to a newly created pseudo that will reach the redundant instruction.

The deletion is done first so that when we do insertions we
know which pseudo reg to use.

Various papers have argued that PRE DFA is expensive (O(n^2)) and others
argue it is not.  The number of iterations for the algorithm to converge
is typically 2-4 so I don't view it as that expensive (relatively speaking).

PRE GCSE depends heavily on the second CPROP pass to clean up the copies
we create.  To make an expression reach the place where it's redundant,
the result of the expression is copied to a new register, and the redundant
expression is deleted by replacing it with this new register.  Classic GCSE
doesn't have this problem as much as it computes the reaching defs of
each register in each block and thus can try to use an existing
register.   
GCSE global vars.   

◆ doing_code_hoisting_p

bool doing_code_hoisting_p = false
static
Doing code hoisting.   

Referenced by one_code_hoisting_pass(), and want_to_gcse_p().

◆ expr_hash_table

◆ flag_rerun_cse_after_global_opts

int flag_rerun_cse_after_global_opts
Set to non-zero if CSE should run after all GCSE optimizations are done.   

Referenced by execute_rtl_cprop(), execute_rtl_hoist(), execute_rtl_pre(), execute_rtl_store_motion(), and rest_of_clean_state().

◆ gcse_create_count

int gcse_create_count
static

◆ gcse_obstack

struct obstack gcse_obstack
static
An obstack for our working variables.   

Referenced by gcse_alloc(), one_code_hoisting_pass(), and one_pre_gcse_pass().

◆ gcse_subst_count

int gcse_subst_count
static
GCSE substitutions made.   

Referenced by hoist_code(), one_code_hoisting_pass(), one_pre_gcse_pass(), and pre_delete().

◆ hoist_vbein

sbitmap* hoist_vbein
static
Code Hoisting variables and subroutines.   
Very busy expressions.   

Referenced by alloc_code_hoist_mem(), compute_code_hoist_vbeinout(), and free_code_hoist_mem().

◆ hoist_vbeout

◆ modify_mem_list

vec<rtx_insn *>* modify_mem_list
static
Array, indexed by basic block number for a list of insns which modify
memory within that block.   

Referenced by alloc_gcse_mem(), clear_modify_mem_tables(), free_modify_mem_tables(), load_killed_in_block_p(), record_last_mem_set_info(), and record_last_mem_set_info_common().

◆ modify_mem_list_set

◆ pre_delete_map

sbitmap* pre_delete_map
static
Nonzero for expressions which should be deleted in a specific block.   

Referenced by alloc_pre_mem(), compute_pre_data(), free_pre_mem(), pre_delete(), and prune_insertions_deletions().

◆ pre_insert_map

sbitmap* pre_insert_map
static
Nonzero for expressions which should be inserted on a specific edge.   

Referenced by alloc_pre_mem(), compute_pre_data(), free_pre_mem(), pre_edge_insert(), and prune_insertions_deletions().

◆ pre_ldst_mems

struct ls_expr* pre_ldst_mems = NULL
static

◆ pre_ldst_table

hash_table<pre_ldst_expr_hasher>* pre_ldst_table
static
Hashtable for the load/store memory refs.   

Referenced by compute_ld_motion_mems(), find_rtx_in_ldst(), free_ld_motion_mems(), ldst_entry(), and trim_ld_motion_mems().

◆ pre_optimal

sbitmap* pre_optimal
static
Nonzero for expressions where this block is an optimal computation
point.   

Referenced by alloc_pre_mem(), and free_pre_mem().

◆ pre_redundant

sbitmap* pre_redundant
static
Nonzero for expressions which are redundant in a particular block.   

Referenced by alloc_pre_mem(), and free_pre_mem().

◆ reg_avail_info

struct reg_avail_info* reg_avail_info
static

◆ reg_set_bitmap

regset reg_set_bitmap
static
Bitmap containing one bit for each register in the program.
Used when performing GCSE to track which registers have been set since
the start of the basic block.   

Referenced by alloc_gcse_mem(), and free_gcse_mem().

◆ test_insn

rtx_insn* test_insn
static
Used internally by can_assign_to_reg_without_clobbers_p.   

Referenced by can_assign_to_reg_without_clobbers_p(), can_reload_into(), and gcse_cc_finalize().

◆ transp