ipa-modref cleanup

Message ID 20211102212004.GB93151@kam.mff.cuni.cz
State New
Headers
Series ipa-modref cleanup |

Commit Message

Jan Hubicka Nov. 2, 2021, 9:20 p.m. UTC
  Hi,
this patch is a small refactoring of ipa-modref to make it bit more
C++y by moving logic analyzing ssa name flags to a class
and I also moved the anonymous namespace markers so we do not
export unnecessary stuff.  There are no functional changes.

Bootstrapped/regtested x86_64-linux, will commit it shortly.

gcc/ChangeLog:

	* ipa-modref.c Fix anonymous namespace placement.
	(class modref_eaf_analysis): New class.
	(analyze_ssa_name_flags): Turn to ...
	(modref_eaf_analysis::analyze_ssa_name): ... this one.
	(merge_call_lhs_flags): Turn to ...
	(modref_eaf_analysis::merge_call_lhs_flags): .. this one
	(modref_eaf_analysis::merge_with_ssa_name): New member function.
	(record_escape_points): Turn to ...
	(modref_eaf_analysis::record_escape_points): ... this one.
	(analyze_parms): Updat
	(ipa_merge_modref_summary_after_inlining): Move to the end of file.
  

Comments

H.J. Lu Nov. 2, 2021, 10:32 p.m. UTC | #1
On Tue, Nov 2, 2021 at 2:21 PM Jan Hubicka via Gcc-patches
<gcc-patches@gcc.gnu.org> wrote:
>
> Hi,
> this patch is a small refactoring of ipa-modref to make it bit more
> C++y by moving logic analyzing ssa name flags to a class
> and I also moved the anonymous namespace markers so we do not
> export unnecessary stuff.  There are no functional changes.
>
> Bootstrapped/regtested x86_64-linux, will commit it shortly.
>
> gcc/ChangeLog:
>
>         * ipa-modref.c Fix anonymous namespace placement.
>         (class modref_eaf_analysis): New class.
>         (analyze_ssa_name_flags): Turn to ...
>         (modref_eaf_analysis::analyze_ssa_name): ... this one.
>         (merge_call_lhs_flags): Turn to ...
>         (modref_eaf_analysis::merge_call_lhs_flags): .. this one
>         (modref_eaf_analysis::merge_with_ssa_name): New member function.
>         (record_escape_points): Turn to ...
>         (modref_eaf_analysis::record_escape_points): ... this one.
>         (analyze_parms): Updat
>         (ipa_merge_modref_summary_after_inlining): Move to the end of file.
>

It broke GCC bootstrap:

https://gcc.gnu.org/pipermail/gcc-regression/2021-November/075676.html

In file included from ../../src-master/gcc/coretypes.h:474,
                 from ../../src-master/gcc/expmed.c:26:
In function ‘poly_uint16 mode_to_bytes(machine_mode)’,
    inlined from ‘typename if_nonpoly<typename
T::measurement_type>::type GET_MODE_SIZE(const T&) [with T =
scalar_int_mode]’ at ../../src-master/gcc/machmode.h:647:24,
    inlined from ‘rtx_def* emit_store_flag_1(rtx, rtx_code, rtx, rtx,
machine_mode, int, int, machine_mode)’ at
../../src-master/gcc/expmed.c:5723:56:
../../src-master/gcc/machmode.h:550:49: warning: ‘*(unsigned
int*)((char*)&int_mode + offsetof(scalar_int_mode,
scalar_int_mode::m_mode))’ may be used uninitialized in this function
[-Wmaybe-uninitialized]
  550 |           ? mode_size_inline (mode) : mode_size[mode]);
      |                                                 ^~~~
../../src-master/gcc/expmed.c: In function ‘rtx_def*
emit_store_flag_1(rtx, rtx_code, rtx, rtx, machine_mode, int, int,
machine_mode)’:
../../src-master/gcc/expmed.c:5652:19: note: ‘*(unsigned
int*)((char*)&int_mode + offsetof(scalar_int_mode,
scalar_int_mode::m_mode))’ was declared here
 5652 |   scalar_int_mode int_mode;
      |                   ^~~~~~~~
/export/gnu/import/git/gcc-test-master-intel64-native/bld/./prev-gcc/xg++
-B/export/gnu/import/git/gcc-test-master-intel64-native/bld/./prev-gcc/
-B/usr/12.0.0/x86_64-pc-linux-gnu/bin/ -nostdinc++
-B/export/gnu/import/git/gcc-test-master-intel64-native/bld/prev-x86_64-pc-linux-gnu/libstdc++-v3/src/.libs
-B/export/gnu/import/git/gcc-test-master-intel64-native/bld/prev-x86_64-pc-linux-gnu/libstdc++-v3/libsupc++/.libs
 -I/export/gnu/import/git/gcc-test-master-intel64-native/bld/prev-x86_64-pc-linux-gnu/libstdc++-v3/include/x86_64-pc-linux-gnu
 -I/export/gnu/import/git/gcc-test-master-intel64-native/bld/prev-x86_64-pc-linux-gnu/libstdc++-v3/include
 -I/export/gnu/import/git/gcc-test-master-intel64-native/src-master/libstdc++-v3/libsupc++
-L/export/gnu/import/git/gcc-test-master-intel64-native/bld/prev-x86_64-pc-linux-gnu/libstdc++-v3/src/.libs
-L/export/gnu/import/git/gcc-test-master-intel64-native/bld/prev-x86_64-pc-linux-gnu/libstdc++-v3/libsupc++/.libs
 -fno-PIE -c   -g -O2 -fno-checking -gtoggle -DIN_GCC
-fno-exceptions -fno-rtti -fasynchronous-unwind-tables -W -Wall
-Wno-narrowing -Wwrite-strings -Wcast-qual -Wno-error=format-diag
-Wmissing-format-attribute -Woverloaded-virtual -pedantic
-Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -Werror
-fno-common  -DHAVE_CONFIG_H -I. -I. -I../../src-master/gcc
-I../../src-master/gcc/. -I../../src-master/gcc/../include
-I../../src-master/gcc/../libcpp/include
-I../../src-master/gcc/../libcody
-I../../src-master/gcc/../libdecnumber
-I../../src-master/gcc/../libdecnumber/bid -I../libdecnumber
-I../../src-master/gcc/../libbacktrace   -o tree-eh.o -MT tree-eh.o
-MMD -MP -MF ./.deps/tree-eh.TPo ../../src-master/gcc/tree-eh.c
cc1plus: all warnings being treated as errors
make[6]: *** [Makefile:1138: ipa-modref.o] Error 1
  
Jan Hubicka Nov. 2, 2021, 11:04 p.m. UTC | #2
> It broke GCC bootstrap:
> 
> https://gcc.gnu.org/pipermail/gcc-regression/2021-November/075676.html
> 
> In file included from ../../src-master/gcc/coretypes.h:474,
>                  from ../../src-master/gcc/expmed.c:26:
> In function ‘poly_uint16 mode_to_bytes(machine_mode)’,
>     inlined from ‘typename if_nonpoly<typename
> T::measurement_type>::type GET_MODE_SIZE(const T&) [with T =
> scalar_int_mode]’ at ../../src-master/gcc/machmode.h:647:24,
>     inlined from ‘rtx_def* emit_store_flag_1(rtx, rtx_code, rtx, rtx,
> machine_mode, int, int, machine_mode)’ at
> ../../src-master/gcc/expmed.c:5723:56:
> ../../src-master/gcc/machmode.h:550:49: warning: ‘*(unsigned
> int*)((char*)&int_mode + offsetof(scalar_int_mode,
> scalar_int_mode::m_mode))’ may be used uninitialized in this function
> [-Wmaybe-uninitialized]
>   550 |           ? mode_size_inline (mode) : mode_size[mode]);
>       |                                                 ^~~~
> ../../src-master/gcc/expmed.c: In function ‘rtx_def*
> emit_store_flag_1(rtx, rtx_code, rtx, rtx, machine_mode, int, int,
> machine_mode)’:
> ../../src-master/gcc/expmed.c:5652:19: note: ‘*(unsigned
> int*)((char*)&int_mode + offsetof(scalar_int_mode,
> scalar_int_mode::m_mode))’ was declared here
>  5652 |   scalar_int_mode int_mode;
>       |                   ^~~~~~~~

I seem to be able to bootstrap normally, so perhaps it is related to
--with-arch=native.  I will try with that config.
However looking at the code
  scalar_int_mode int_mode;
  if (is_int_mode (mode, &int_mode)
      && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2

the int_mode is indeed set conditionally and it is possible that this
may trigger false positive.  I will however try again that we get same
modref summaries before and after patch.

There is PR103040 that shows wrong code bug in my earlier modref change
I am testinf fix for, but if that is the culprint it should have
reproduced earlier than with this commit.

Honza
  
Jakub Jelinek Nov. 2, 2021, 11:14 p.m. UTC | #3
On Wed, Nov 03, 2021 at 12:04:20AM +0100, Jan Hubicka via Gcc-patches wrote:
> > It broke GCC bootstrap:
> > 
> > https://gcc.gnu.org/pipermail/gcc-regression/2021-November/075676.html
> > 
> > In file included from ../../src-master/gcc/coretypes.h:474,
> >                  from ../../src-master/gcc/expmed.c:26:
> > In function ‘poly_uint16 mode_to_bytes(machine_mode)’,
> >     inlined from ‘typename if_nonpoly<typename
> > T::measurement_type>::type GET_MODE_SIZE(const T&) [with T =
> > scalar_int_mode]’ at ../../src-master/gcc/machmode.h:647:24,
> >     inlined from ‘rtx_def* emit_store_flag_1(rtx, rtx_code, rtx, rtx,
> > machine_mode, int, int, machine_mode)’ at
> > ../../src-master/gcc/expmed.c:5723:56:
> > ../../src-master/gcc/machmode.h:550:49: warning: ‘*(unsigned
> > int*)((char*)&int_mode + offsetof(scalar_int_mode,
> > scalar_int_mode::m_mode))’ may be used uninitialized in this function
> > [-Wmaybe-uninitialized]
> >   550 |           ? mode_size_inline (mode) : mode_size[mode]);
> >       |                                                 ^~~~
> > ../../src-master/gcc/expmed.c: In function ‘rtx_def*
> > emit_store_flag_1(rtx, rtx_code, rtx, rtx, machine_mode, int, int,
> > machine_mode)’:
> > ../../src-master/gcc/expmed.c:5652:19: note: ‘*(unsigned
> > int*)((char*)&int_mode + offsetof(scalar_int_mode,
> > scalar_int_mode::m_mode))’ was declared here
> >  5652 |   scalar_int_mode int_mode;
> >       |                   ^~~~~~~~
> 
> I seem to be able to bootstrap normally, so perhaps it is related to
> --with-arch=native.  I will try with that config.
> However looking at the code
>   scalar_int_mode int_mode;
>   if (is_int_mode (mode, &int_mode)
>       && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2
> 
> the int_mode is indeed set conditionally and it is possible that this
> may trigger false positive.  I will however try again that we get same
> modref summaries before and after patch.

I'm pretty sure the above are just warnings and the error H.J. was most
likely see from the gcc-regression mail is the one fixed with
https://gcc.gnu.org/r12-4854-g1fefb6cf62b7411223a5459af40e6b98e3624535
I ran into the same issue...

	Jakub
  

Patch

diff --git a/gcc/ipa-modref.c b/gcc/ipa-modref.c
index bb513128633..f9eafc974d5 100644
--- a/gcc/ipa-modref.c
+++ b/gcc/ipa-modref.c
@@ -693,6 +693,8 @@  get_modref_function_summary (cgraph_node *func)
   return r;
 }
 
+namespace {
+
 /* Construct modref_access_node from REF.  */
 static modref_access_node
 get_access (ao_ref *ref)
@@ -1427,7 +1429,6 @@  deref_flags (int flags, bool ignore_stores)
   return ret;
 }
 
-namespace {
 
 /* Description of an escape point.  */
 
@@ -1645,22 +1646,62 @@  modref_lattice::merge_direct_store ()
   return merge (~(EAF_UNUSED | EAF_NOCLOBBER));
 }
 
-}  /* ANON namespace.  */
+/* Analyzer of EAF flags.  */
+
+class modref_eaf_analysis
+{
+public:
+  /* Compute flags for NAME.  */
+  void analyze_ssa_name (tree name);
+  /* Return flags computed earlier for NAME.  */
+  int get_ssa_name_flags (tree name)
+  {
+    int version = SSA_NAME_VERSION (name);
+    gcc_checking_assert (m_lattice[version].known);
+    return m_lattice[version].flags;
+  }
+  /* In IPA mode this will record all escape points
+     determined for NAME to PARM_IDNEX.  Flags are minimal
+     flags known.  */
+  void record_escape_points (tree name, int parm_index, int flags);
+  modref_eaf_analysis (bool ipa)
+  {
+    m_ipa = ipa;
+    m_depth = 0;
+    m_lattice.safe_grow_cleared (num_ssa_names, true);
+  }
+  ~modref_eaf_analysis ()
+  {
+    gcc_checking_assert (!m_depth);
+    if (m_ipa)
+      for (unsigned int i = 0; i < num_ssa_names; i++)
+	m_lattice[i].release ();
+  }
+private:
+  /* If true, we produce analysis for IPA mode.  In this case escape points ar
+     collected.  */
+  bool m_ipa;
+  /* Depth of recursion of analyze_ssa_name.  */
+  int m_depth;
+  /* Propagation lattice for individual ssa names.  */
+  auto_vec<modref_lattice> m_lattice;
+
+  void merge_with_ssa_name (tree dest, tree src, bool deref);
+  void merge_call_lhs_flags (gcall *call, int arg, tree name, bool deref);
+};
 
-static void analyze_ssa_name_flags (tree name,
-				    vec<modref_lattice> &lattice,
-				    int depth, bool ipa);
 
 /* Call statements may return their parameters.  Consider argument number
    ARG of USE_STMT and determine flags that can needs to be cleared
    in case pointer possibly indirectly references from ARG I is returned.
-   LATTICE, DEPTH and ipa are same as in analyze_ssa_name_flags.  */
+   LATTICE, DEPTH and ipa are same as in analyze_ssa_name.  */
 
-static void
-merge_call_lhs_flags (gcall *call, int arg, int index, bool deref,
-		      vec<modref_lattice> &lattice,
-		      int depth, bool ipa)
+void
+modref_eaf_analysis::merge_call_lhs_flags (gcall *call, int arg,
+					   tree name, bool deref)
 {
+  int index = SSA_NAME_VERSION (name);
+
   /* If there is no return value, no flags are affected.  */
   if (!gimple_call_lhs (call))
     return;
@@ -1681,17 +1722,14 @@  merge_call_lhs_flags (gcall *call, int arg, int index, bool deref,
   if (TREE_CODE (gimple_call_lhs (call)) == SSA_NAME)
     {
       tree lhs = gimple_call_lhs (call);
-      analyze_ssa_name_flags (lhs, lattice, depth + 1, ipa);
-      if (deref || (eaf_flags & EAF_NOT_RETURNED_DIRECTLY))
-	lattice[index].merge_deref (lattice[SSA_NAME_VERSION (lhs)], false);
-      else
-	lattice[index].merge (lattice[SSA_NAME_VERSION (lhs)]);
+      merge_with_ssa_name (name, lhs,
+			   (deref || (eaf_flags & EAF_NOT_RETURNED_DIRECTLY)));
     }
   /* In the case of memory store we can do nothing.  */
   else if (eaf_flags & EAF_NOT_RETURNED_DIRECTLY)
-    lattice[index].merge (deref_flags (0, false));
+    m_lattice[index].merge (deref_flags (0, false));
   else
-    lattice[index].merge (0);
+    m_lattice[index].merge (0);
 }
 
 /* Analyze EAF flags for SSA name NAME and store result to LATTICE.
@@ -1700,56 +1738,57 @@  merge_call_lhs_flags (gcall *call, int arg, int index, bool deref,
    If IPA is true we analyze for IPA propagation (and thus call escape points
    are processed later)  */
 
-static void
-analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
-			bool ipa)
+void
+modref_eaf_analysis::analyze_ssa_name (tree name)
 {
   imm_use_iterator ui;
   gimple *use_stmt;
   int index = SSA_NAME_VERSION (name);
 
   /* See if value is already computed.  */
-  if (lattice[index].known)
+  if (m_lattice[index].known)
    return;
-  if (lattice[index].open)
+  if (m_lattice[index].open)
     {
       if (dump_file)
 	fprintf (dump_file,
-		 "%*sGiving up on a cycle in SSA graph\n", depth * 4, "");
+		 "%*sGiving up on a cycle in SSA graph\n",
+		 m_depth * 4, "");
       return;
     }
-  if (depth == param_modref_max_depth)
+  if (m_depth == param_modref_max_depth)
     {
       if (dump_file)
 	fprintf (dump_file,
-		 "%*sGiving up on max depth\n", depth * 4, "");
+		 "%*sGiving up on max depth\n",
+		 m_depth * 4, "");
       return;
     }
   /* Recursion guard.  */
-  lattice[index].init ();
+  m_lattice[index].init ();
 
   if (dump_file)
     {
       fprintf (dump_file,
-	       "%*sAnalyzing flags of ssa name: ", depth * 4, "");
+	       "%*sAnalyzing flags of ssa name: ", m_depth * 4, "");
       print_generic_expr (dump_file, name);
       fprintf (dump_file, "\n");
     }
 
   FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
     {
-      if (lattice[index].flags == 0)
+      if (m_lattice[index].flags == 0)
 	break;
       if (is_gimple_debug (use_stmt))
 	continue;
       if (dump_file)
 	{
-	  fprintf (dump_file, "%*s  Analyzing stmt: ", depth * 4, "");
+	  fprintf (dump_file, "%*s  Analyzing stmt: ", m_depth * 4, "");
 	  print_gimple_stmt (dump_file, use_stmt, 0);
 	}
       /* If we see a direct non-debug use, clear unused bit.
 	 All dereferneces should be accounted below using deref_flags.  */
-      lattice[index].merge (~EAF_UNUSED);
+      m_lattice[index].merge (~EAF_UNUSED);
 
       /* Gimple return may load the return value.
 	 Returning name counts as an use by tree-ssa-structalias.c  */
@@ -1760,13 +1799,13 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
 	      && DECL_BY_REFERENCE (DECL_RESULT (current_function_decl)))
 	    ;
 	  else if (gimple_return_retval (ret) == name)
-	    lattice[index].merge (~(EAF_UNUSED | EAF_NOT_RETURNED
-				    | EAF_NOT_RETURNED_DIRECTLY));
+	    m_lattice[index].merge (~(EAF_UNUSED | EAF_NOT_RETURNED
+				      | EAF_NOT_RETURNED_DIRECTLY));
 	  else if (memory_access_to (gimple_return_retval (ret), name))
 	    {
-	      lattice[index].merge_direct_load ();
-	      lattice[index].merge (~(EAF_UNUSED | EAF_NOT_RETURNED
-				      | EAF_NOT_RETURNED_DIRECTLY));
+	      m_lattice[index].merge_direct_load ();
+	      m_lattice[index].merge (~(EAF_UNUSED | EAF_NOT_RETURNED
+					| EAF_NOT_RETURNED_DIRECTLY));
 	    }
 	}
       /* Account for LHS store, arg loads and flags from callee function.  */
@@ -1780,12 +1819,12 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
 	     is on since that would allow propagation of this from -fno-ipa-pta
 	     to -fipa-pta functions.  */
 	  if (gimple_call_fn (use_stmt) == name)
-	    lattice[index].merge (~(EAF_NOCLOBBER | EAF_UNUSED));
+	    m_lattice[index].merge (~(EAF_NOCLOBBER | EAF_UNUSED));
 
 	  /* Recursion would require bit of propagation; give up for now.  */
-	  if (callee && !ipa && recursive_call_p (current_function_decl,
+	  if (callee && !m_ipa && recursive_call_p (current_function_decl,
 						  callee))
-	    lattice[index].merge (0);
+	    m_lattice[index].merge (0);
 	  else
 	    {
 	      int ecf_flags = gimple_call_flags (call);
@@ -1798,25 +1837,26 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
 	      if (gimple_call_lhs (call)
 		  && memory_access_to (gimple_call_lhs (call), name))
 		{
-		  lattice[index].merge_direct_store ();
+		  m_lattice[index].merge_direct_store ();
 		  /* Return slot optimization passes address of
 		     LHS to callee via hidden parameter and this
 		     may make LHS to escape.  See PR 98499.  */
 		  if (gimple_call_return_slot_opt_p (call)
 		      && TREE_ADDRESSABLE (TREE_TYPE (gimple_call_lhs (call))))
-		    lattice[index].merge (gimple_call_retslot_flags (call));
+		    m_lattice[index].merge (gimple_call_retslot_flags (call));
 		}
 
 	      if (gimple_call_chain (call)
 		  && (gimple_call_chain (call) == name))
-		lattice[index].merge (gimple_call_static_chain_flags (call));
+		m_lattice[index].merge (gimple_call_static_chain_flags (call));
 
 	      /* Process internal functions and right away.  */
-	      bool record_ipa = ipa && !gimple_call_internal_p (call);
+	      bool record_ipa = m_ipa && !gimple_call_internal_p (call);
 
 	      /* Handle all function parameters.  */
 	      for (unsigned i = 0;
-		   i < gimple_call_num_args (call) && lattice[index].flags; i++)
+		   i < gimple_call_num_args (call)
+		   && m_lattice[index].flags; i++)
 		/* Name is directly passed to the callee.  */
 		if (gimple_call_arg (call, i) == name)
 		  {
@@ -1829,20 +1869,19 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
 			  call_flags |= ignore_stores_eaf_flags;
 
 			if (!record_ipa)
-			  lattice[index].merge (call_flags);
+			  m_lattice[index].merge (call_flags);
 			else
-			  lattice[index].add_escape_point (call, i,
+			  m_lattice[index].add_escape_point (call, i,
 							   call_flags, true);
 		      }
 		    if (!ignore_retval)
-		      merge_call_lhs_flags (call, i, index, false,
-					    lattice, depth, ipa);
+		      merge_call_lhs_flags (call, i, name, false);
 		  }
 		/* Name is dereferenced and passed to a callee.  */
 		else if (memory_access_to (gimple_call_arg (call, i), name))
 		  {
 		    if (ecf_flags & (ECF_CONST | ECF_NOVOPS))
-		      lattice[index].merge_direct_load ();
+		      m_lattice[index].merge_direct_load ();
 		    else
 		      {
 			int call_flags = deref_flags
@@ -1850,14 +1889,13 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
 			    | EAF_NOT_RETURNED
 			    | EAF_NOT_RETURNED_DIRECTLY, ignore_stores);
 			if (!record_ipa)
-			  lattice[index].merge (call_flags);
+			  m_lattice[index].merge (call_flags);
 			else
-			  lattice[index].add_escape_point (call, i,
+			  m_lattice[index].add_escape_point (call, i,
 							   call_flags, false);
 		      }
 		    if (!ignore_retval)
-		      merge_call_lhs_flags (call, i, index, true,
-					    lattice, depth, ipa);
+		      merge_call_lhs_flags (call, i, name, true);
 		  }
 	    }
 	}
@@ -1872,18 +1910,16 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
 		 We do not track memory locations, so assume that value
 		 is used arbitrarily.  */
 	      if (memory_access_to (gimple_assign_rhs1 (assign), name))
-		lattice[index].merge (deref_flags (0, false));
+		m_lattice[index].merge (deref_flags (0, false));
 	      /* Handle *name = *exp.  */
 	      else if (memory_access_to (gimple_assign_lhs (assign), name))
-		lattice[index].merge_direct_store ();
+		m_lattice[index].merge_direct_store ();
 	    }
 	  /* Handle lhs = *name.  */
 	  else if (memory_access_to (gimple_assign_rhs1 (assign), name))
 	    {
 	      tree lhs = gimple_assign_lhs (assign);
-	      analyze_ssa_name_flags (lhs, lattice, depth + 1, ipa);
-	      lattice[index].merge_deref (lattice[SSA_NAME_VERSION (lhs)],
-					  false);
+	      merge_with_ssa_name (name, lhs, true);
 	    }
 	}
       else if (gimple_store_p (use_stmt))
@@ -1895,8 +1931,8 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
 	    {
 	      if (dump_file)
 		fprintf (dump_file, "%*s  ssa name saved to memory\n",
-			 depth * 4, "");
-	      lattice[index].merge (0);
+			 m_depth * 4, "");
+	      m_lattice[index].merge (0);
 	    }
 	  /* Handle *name = exp.  */
 	  else if (assign
@@ -1905,17 +1941,17 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
 	      /* In general we can not ignore clobbers because they are
 		 barriers for code motion, however after inlining it is safe to
 		 do because local optimization passes do not consider clobbers
-		 from other functions.  Similar logic is in ipa-pure-const.c.  */
+		 from other functions.
+		 Similar logic is in ipa-pure-const.c.  */
 	      if (!cfun->after_inlining || !gimple_clobber_p (assign))
-		lattice[index].merge_direct_store ();
+		m_lattice[index].merge_direct_store ();
 	    }
 	  /* ASM statements etc.  */
 	  else if (!assign)
 	    {
 	      if (dump_file)
-		fprintf (dump_file, "%*s  Unhandled store\n",
-			 depth * 4, "");
-	      lattice[index].merge (0);
+		fprintf (dump_file, "%*s  Unhandled store\n", m_depth * 4, "");
+	      m_lattice[index].merge (0);
 	    }
 	}
       else if (gassign *assign = dyn_cast <gassign *> (use_stmt))
@@ -1930,15 +1966,13 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
 		  || gimple_assign_rhs1 (assign) == name))
 	    {
 	      tree lhs = gimple_assign_lhs (assign);
-	      analyze_ssa_name_flags (lhs, lattice, depth + 1, ipa);
-	      lattice[index].merge (lattice[SSA_NAME_VERSION (lhs)]);
+	      merge_with_ssa_name (name, lhs, false);
 	    }
 	}
       else if (gphi *phi = dyn_cast <gphi *> (use_stmt))
 	{
 	  tree result = gimple_phi_result (phi);
-	  analyze_ssa_name_flags (result, lattice, depth + 1, ipa);
-	  lattice[index].merge (lattice[SSA_NAME_VERSION (result)]);
+	  merge_with_ssa_name (name, result, false);
 	}
       /* Conditions are not considered escape points
 	 by tree-ssa-structalias.  */
@@ -1947,38 +1981,59 @@  analyze_ssa_name_flags (tree name, vec<modref_lattice> &lattice, int depth,
       else
 	{
 	  if (dump_file)
-	    fprintf (dump_file, "%*s  Unhandled stmt\n", depth * 4, "");
-	  lattice[index].merge (0);
+	    fprintf (dump_file, "%*s  Unhandled stmt\n", m_depth * 4, "");
+	  m_lattice[index].merge (0);
 	}
 
       if (dump_file)
 	{
-	  fprintf (dump_file, "%*s  current flags of ", depth * 4, "");
+	  fprintf (dump_file, "%*s  current flags of ", m_depth * 4, "");
 	  print_generic_expr (dump_file, name);
-	  lattice[index].dump (dump_file, depth * 4 + 4);
+	  m_lattice[index].dump (dump_file, m_depth * 4 + 4);
 	}
     }
   if (dump_file)
     {
-      fprintf (dump_file, "%*sflags of ssa name ", depth * 4, "");
+      fprintf (dump_file, "%*sflags of ssa name ", m_depth * 4, "");
       print_generic_expr (dump_file, name);
-      lattice[index].dump (dump_file, depth * 4 + 2);
+      m_lattice[index].dump (dump_file, m_depth * 4 + 2);
     }
-  lattice[index].open = false;
-  lattice[index].known = true;
+  m_lattice[index].open = false;
+  m_lattice[index].known = true;
+}
+
+/* Propagate info from SRC to DEST.  If DEREF it true, assume that SRC
+   is dereferenced.  */
+
+void
+modref_eaf_analysis::merge_with_ssa_name (tree dest, tree src, bool deref)
+{
+  int index = SSA_NAME_VERSION (dest);
+  int src_index = SSA_NAME_VERSION (src);
+
+  m_depth++;
+  analyze_ssa_name (src);
+  m_depth--;
+  if (deref)
+    m_lattice[index].merge_deref (m_lattice[src_index], false);
+  else
+    m_lattice[index].merge (m_lattice[src_index]);
 }
 
 /* Record escape points of PARM_INDEX according to LATTICE.  */
 
-static void
-record_escape_points (modref_lattice &lattice, int parm_index, int flags)
+void
+modref_eaf_analysis::record_escape_points (tree name, int parm_index, int flags)
 {
+  modref_lattice &lattice = m_lattice[SSA_NAME_VERSION (name)];
+
   if (lattice.escape_points.length ())
     {
       escape_point *ep;
       unsigned int ip;
       cgraph_node *node = cgraph_node::get (current_function_decl);
 
+      gcc_assert (m_ipa);
       FOR_EACH_VEC_ELT (lattice.escape_points, ip, ep)
 	if ((ep->min_flags & flags) != flags)
 	  {
@@ -2021,8 +2076,7 @@  analyze_parms (modref_summary *summary, modref_summary_lto *summary_lto,
   if (!count && !retslot && !static_chain)
     return;
 
-  auto_vec<modref_lattice> lattice;
-  lattice.safe_grow_cleared (num_ssa_names, true);
+  modref_eaf_analysis eaf_analysis (ipa);
 
   for (tree parm = DECL_ARGUMENTS (current_function_decl); parm; parm_index++,
        parm = TREE_CHAIN (parm))
@@ -2048,8 +2102,8 @@  analyze_parms (modref_summary *summary, modref_summary_lto *summary_lto,
 	    }
 	  continue;
 	}
-      analyze_ssa_name_flags (name, lattice, 0, ipa);
-      int flags = lattice[SSA_NAME_VERSION (name)].flags;
+      eaf_analysis.analyze_ssa_name (name);
+      int flags = eaf_analysis.get_ssa_name_flags (name);
 
       /* Eliminate useless flags so we do not end up storing unnecessary
 	 summaries.  */
@@ -2072,14 +2126,13 @@  analyze_parms (modref_summary *summary, modref_summary_lto *summary_lto,
 		summary_lto->arg_flags.safe_grow_cleared (count, true);
 	      summary_lto->arg_flags[parm_index] = flags;
 	    }
-	  record_escape_points (lattice[SSA_NAME_VERSION (name)],
-				parm_index, flags);
+	  eaf_analysis.record_escape_points (name, parm_index, flags);
 	}
     }
   if (retslot)
     {
-      analyze_ssa_name_flags (retslot, lattice, 0, ipa);
-      int flags = lattice[SSA_NAME_VERSION (retslot)].flags;
+      eaf_analysis.analyze_ssa_name (retslot);
+      int flags = eaf_analysis.get_ssa_name_flags (retslot);
 
       flags = remove_useless_eaf_flags (flags, ecf_flags, false);
       if (flags)
@@ -2088,14 +2141,14 @@  analyze_parms (modref_summary *summary, modref_summary_lto *summary_lto,
 	    summary->retslot_flags = flags;
 	  if (summary_lto)
 	    summary_lto->retslot_flags = flags;
-	  record_escape_points (lattice[SSA_NAME_VERSION (retslot)],
-				escape_point::retslot_arg, flags);
+	  eaf_analysis.record_escape_points (retslot,
+					     escape_point::retslot_arg, flags);
 	}
     }
   if (static_chain)
     {
-      analyze_ssa_name_flags (static_chain, lattice, 0, ipa);
-      int flags = lattice[SSA_NAME_VERSION (static_chain)].flags;
+      eaf_analysis.analyze_ssa_name (static_chain);
+      int flags = eaf_analysis.get_ssa_name_flags (static_chain);
 
       flags = remove_useless_eaf_flags (flags, ecf_flags, false);
       if (flags)
@@ -2104,13 +2157,11 @@  analyze_parms (modref_summary *summary, modref_summary_lto *summary_lto,
 	    summary->static_chain_flags = flags;
 	  if (summary_lto)
 	    summary_lto->static_chain_flags = flags;
-	  record_escape_points (lattice[SSA_NAME_VERSION (static_chain)],
-				escape_point::static_chain_arg, flags);
+	  eaf_analysis.record_escape_points (static_chain,
+					     escape_point::static_chain_arg,
+					     flags);
 	}
     }
-  if (ipa)
-    for (unsigned int i = 0; i < num_ssa_names; i++)
-      lattice[i].release ();
 }
 
 /* Analyze function F.  IPA indicates whether we're running in local mode
@@ -2322,6 +2373,8 @@  modref_generate (void)
     }
 }
 
+}  /* ANON namespace.  */
+
 /* Called when a new function is inserted to callgraph late.  */
 
 void
@@ -2509,7 +2562,7 @@  write_modref_records (modref_records_lto *tt, struct output_block *ob)
    Either nolto_ret or lto_ret is initialized by the tree depending whether
    LTO streaming is expected or not.  */
 
-void
+static void
 read_modref_records (lto_input_block *ib, struct data_in *data_in,
 		     modref_records **nolto_ret,
 		     modref_records_lto **lto_ret)
@@ -3100,6 +3153,8 @@  make_pass_ipa_modref (gcc::context *ctxt)
   return new pass_ipa_modref (ctxt);
 }
 
+namespace {
+
 /* Skip edges from and to nodes without ipa_pure_const enabled.
    Ignore not available symbols.  */
 
@@ -3279,184 +3334,6 @@  update_escape_summary (cgraph_node *node,
     }
 }
 
-/* Call EDGE was inlined; merge summary from callee to the caller.  */
-
-void
-ipa_merge_modref_summary_after_inlining (cgraph_edge *edge)
-{
-  if (!summaries && !summaries_lto)
-    return;
-
-  struct cgraph_node *to = (edge->caller->inlined_to
-			    ? edge->caller->inlined_to : edge->caller);
-  class modref_summary *to_info = summaries ? summaries->get (to) : NULL;
-  class modref_summary_lto *to_info_lto = summaries_lto
-					  ? summaries_lto->get (to) : NULL;
-
-  if (!to_info && !to_info_lto)
-    {
-      if (summaries)
-	summaries->remove (edge->callee);
-      if (summaries_lto)
-	summaries_lto->remove (edge->callee);
-      remove_modref_edge_summaries (edge->callee);
-      return;
-    }
-
-  class modref_summary *callee_info = summaries ? summaries->get (edge->callee)
-				      : NULL;
-  class modref_summary_lto *callee_info_lto
-		 = summaries_lto ? summaries_lto->get (edge->callee) : NULL;
-  int flags = flags_from_decl_or_type (edge->callee->decl);
-  bool ignore_stores = ignore_stores_p (edge->caller->decl, flags);
-
-  if (!callee_info && to_info)
-    {
-      if (!(flags & (ECF_CONST | ECF_NOVOPS)))
-	to_info->loads->collapse ();
-      if (!ignore_stores)
-	to_info->stores->collapse ();
-    }
-  if (!callee_info_lto && to_info_lto)
-    {
-      if (!(flags & (ECF_CONST | ECF_NOVOPS)))
-	to_info_lto->loads->collapse ();
-      if (!ignore_stores)
-	to_info_lto->stores->collapse ();
-    }
-  if (callee_info || callee_info_lto)
-    {
-      auto_vec <modref_parm_map, 32> parm_map;
-
-      compute_parm_map (edge, &parm_map);
-
-      if (!ignore_stores)
-	{
-	  if (to_info && callee_info)
-	    to_info->stores->merge (callee_info->stores, &parm_map, false);
-	  if (to_info_lto && callee_info_lto)
-	    to_info_lto->stores->merge (callee_info_lto->stores, &parm_map,
-					false);
-	}
-      if (!(flags & (ECF_CONST | ECF_NOVOPS)))
-	{
-	  if (to_info && callee_info)
-	    to_info->loads->merge (callee_info->loads, &parm_map, false);
-	  if (to_info_lto && callee_info_lto)
-	    to_info_lto->loads->merge (callee_info_lto->loads, &parm_map,
-				       false);
-	}
-    }
-
-  /* Now merge escape summaries.
-     For every escape to the callee we need to merge calle flags
-     and remap calees escapes.  */
-  class escape_summary *sum = escape_summaries->get (edge);
-  int max_escape = -1;
-  escape_entry *ee;
-  unsigned int i;
-
-  if (sum && !(flags & (ECF_CONST | ECF_NOVOPS)))
-    FOR_EACH_VEC_ELT (sum->esc, i, ee)
-      if ((int)ee->arg > max_escape)
-	max_escape = ee->arg;
-
-  auto_vec <vec <struct escape_map>, 32> emap (max_escape + 1);
-  emap.safe_grow (max_escape + 1, true);
-  for (i = 0; (int)i < max_escape + 1; i++)
-    emap[i] = vNULL;
-
-  if (sum && !(flags & (ECF_CONST | ECF_NOVOPS)))
-    FOR_EACH_VEC_ELT (sum->esc, i, ee)
-      {
-	bool needed = false;
-	/* TODO: We do not have jump functions for return slots, so we
-	   never propagate them to outer function.  */
-	if (ee->parm_index < 0)
-	  continue;
-	if (to_info && (int)to_info->arg_flags.length () > ee->parm_index)
-	  {
-	    int flags = callee_info
-			&& callee_info->arg_flags.length () > ee->arg
-			? callee_info->arg_flags[ee->arg] : 0;
-	    if (!ee->direct)
-	      flags = deref_flags (flags, ignore_stores);
-	    else if (ignore_stores)
-	      flags |= ignore_stores_eaf_flags;
-	    flags |= ee->min_flags;
-	    to_info->arg_flags[ee->parm_index] &= flags;
-	    if (to_info->arg_flags[ee->parm_index])
-	      needed = true;
-	  }
-	if (to_info_lto && (int)to_info_lto->arg_flags.length () > ee->parm_index)
-	  {
-	    int flags = callee_info_lto
-			&& callee_info_lto->arg_flags.length () > ee->arg
-			? callee_info_lto->arg_flags[ee->arg] : 0;
-	    if (!ee->direct)
-	      flags = deref_flags (flags, ignore_stores);
-	    else if (ignore_stores)
-	      flags |= ignore_stores_eaf_flags;
-	    flags |= ee->min_flags;
-	    to_info_lto->arg_flags[ee->parm_index] &= flags;
-	    if (to_info_lto->arg_flags[ee->parm_index])
-	      needed = true;
-	  }
-	struct escape_map entry = {ee->parm_index, ee->direct};
-	if (needed)
-	  emap[ee->arg].safe_push (entry);
-      }
-  update_escape_summary (edge->callee, emap, ignore_stores);
-  for (i = 0; (int)i < max_escape + 1; i++)
-    emap[i].release ();
-  if (sum)
-    escape_summaries->remove (edge);
-
-  if (summaries)
-    {
-      if (to_info && !to_info->useful_p (flags))
-	{
-	  if (dump_file)
-	    fprintf (dump_file, "Removed mod-ref summary for %s\n",
-		     to->dump_name ());
-	  summaries->remove (to);
-	  to_info = NULL;
-	}
-      else if (to_info && dump_file)
-	{
-	  if (dump_file)
-	    fprintf (dump_file, "Updated mod-ref summary for %s\n",
-		     to->dump_name ());
-	  to_info->dump (dump_file);
-	}
-      if (callee_info)
-	summaries->remove (edge->callee);
-    }
-  if (summaries_lto)
-    {
-      if (to_info_lto && !to_info_lto->useful_p (flags))
-	{
-	  if (dump_file)
-	    fprintf (dump_file, "Removed mod-ref summary for %s\n",
-		     to->dump_name ());
-	  summaries_lto->remove (to);
-	}
-      else if (to_info_lto && dump_file)
-	{
-	  if (dump_file)
-	    fprintf (dump_file, "Updated mod-ref summary for %s\n",
-		     to->dump_name ());
-	  to_info_lto->dump (dump_file);
-	  to_info_lto = NULL;
-	}
-      if (callee_info_lto)
-	summaries_lto->remove (edge->callee);
-    }
-  if (!to_info && !to_info_lto)
-    remove_modref_edge_summaries (to);
-  return;
-}
-
 /* Get parameter type from DECL.  This is only safe for special cases
    like builtins we create fnspec for because the type match is checked
    at fnspec creation time.  */
@@ -4079,6 +3956,187 @@  modref_propagate_flags_in_scc (cgraph_node *component_node)
 	     "Propagation of flags finished in %i iterations\n", iteration);
 }
 
+}  /* ANON namespace.  */
+
+/* Call EDGE was inlined; merge summary from callee to the caller.  */
+
+void
+ipa_merge_modref_summary_after_inlining (cgraph_edge *edge)
+{
+  if (!summaries && !summaries_lto)
+    return;
+
+  struct cgraph_node *to = (edge->caller->inlined_to
+			    ? edge->caller->inlined_to : edge->caller);
+  class modref_summary *to_info = summaries ? summaries->get (to) : NULL;
+  class modref_summary_lto *to_info_lto = summaries_lto
+					  ? summaries_lto->get (to) : NULL;
+
+  if (!to_info && !to_info_lto)
+    {
+      if (summaries)
+	summaries->remove (edge->callee);
+      if (summaries_lto)
+	summaries_lto->remove (edge->callee);
+      remove_modref_edge_summaries (edge->callee);
+      return;
+    }
+
+  class modref_summary *callee_info = summaries ? summaries->get (edge->callee)
+				      : NULL;
+  class modref_summary_lto *callee_info_lto
+		 = summaries_lto ? summaries_lto->get (edge->callee) : NULL;
+  int flags = flags_from_decl_or_type (edge->callee->decl);
+  bool ignore_stores = ignore_stores_p (edge->caller->decl, flags);
+
+  if (!callee_info && to_info)
+    {
+      if (!(flags & (ECF_CONST | ECF_NOVOPS)))
+	to_info->loads->collapse ();
+      if (!ignore_stores)
+	to_info->stores->collapse ();
+    }
+  if (!callee_info_lto && to_info_lto)
+    {
+      if (!(flags & (ECF_CONST | ECF_NOVOPS)))
+	to_info_lto->loads->collapse ();
+      if (!ignore_stores)
+	to_info_lto->stores->collapse ();
+    }
+  if (callee_info || callee_info_lto)
+    {
+      auto_vec <modref_parm_map, 32> parm_map;
+
+      compute_parm_map (edge, &parm_map);
+
+      if (!ignore_stores)
+	{
+	  if (to_info && callee_info)
+	    to_info->stores->merge (callee_info->stores, &parm_map, false);
+	  if (to_info_lto && callee_info_lto)
+	    to_info_lto->stores->merge (callee_info_lto->stores, &parm_map,
+					false);
+	}
+      if (!(flags & (ECF_CONST | ECF_NOVOPS)))
+	{
+	  if (to_info && callee_info)
+	    to_info->loads->merge (callee_info->loads, &parm_map, false);
+	  if (to_info_lto && callee_info_lto)
+	    to_info_lto->loads->merge (callee_info_lto->loads, &parm_map,
+				       false);
+	}
+    }
+
+  /* Now merge escape summaries.
+     For every escape to the callee we need to merge calle flags
+     and remap calees escapes.  */
+  class escape_summary *sum = escape_summaries->get (edge);
+  int max_escape = -1;
+  escape_entry *ee;
+  unsigned int i;
+
+  if (sum && !(flags & (ECF_CONST | ECF_NOVOPS)))
+    FOR_EACH_VEC_ELT (sum->esc, i, ee)
+      if ((int)ee->arg > max_escape)
+	max_escape = ee->arg;
+
+  auto_vec <vec <struct escape_map>, 32> emap (max_escape + 1);
+  emap.safe_grow (max_escape + 1, true);
+  for (i = 0; (int)i < max_escape + 1; i++)
+    emap[i] = vNULL;
+
+  if (sum && !(flags & (ECF_CONST | ECF_NOVOPS)))
+    FOR_EACH_VEC_ELT (sum->esc, i, ee)
+      {
+	bool needed = false;
+	/* TODO: We do not have jump functions for return slots, so we
+	   never propagate them to outer function.  */
+	if (ee->parm_index < 0)
+	  continue;
+	if (to_info && (int)to_info->arg_flags.length () > ee->parm_index)
+	  {
+	    int flags = callee_info
+			&& callee_info->arg_flags.length () > ee->arg
+			? callee_info->arg_flags[ee->arg] : 0;
+	    if (!ee->direct)
+	      flags = deref_flags (flags, ignore_stores);
+	    else if (ignore_stores)
+	      flags |= ignore_stores_eaf_flags;
+	    flags |= ee->min_flags;
+	    to_info->arg_flags[ee->parm_index] &= flags;
+	    if (to_info->arg_flags[ee->parm_index])
+	      needed = true;
+	  }
+	if (to_info_lto
+     	    && (int)to_info_lto->arg_flags.length () > ee->parm_index)
+	  {
+	    int flags = callee_info_lto
+			&& callee_info_lto->arg_flags.length () > ee->arg
+			? callee_info_lto->arg_flags[ee->arg] : 0;
+	    if (!ee->direct)
+	      flags = deref_flags (flags, ignore_stores);
+	    else if (ignore_stores)
+	      flags |= ignore_stores_eaf_flags;
+	    flags |= ee->min_flags;
+	    to_info_lto->arg_flags[ee->parm_index] &= flags;
+	    if (to_info_lto->arg_flags[ee->parm_index])
+	      needed = true;
+	  }
+	struct escape_map entry = {ee->parm_index, ee->direct};
+	if (needed)
+	  emap[ee->arg].safe_push (entry);
+      }
+  update_escape_summary (edge->callee, emap, ignore_stores);
+  for (i = 0; (int)i < max_escape + 1; i++)
+    emap[i].release ();
+  if (sum)
+    escape_summaries->remove (edge);
+
+  if (summaries)
+    {
+      if (to_info && !to_info->useful_p (flags))
+	{
+	  if (dump_file)
+	    fprintf (dump_file, "Removed mod-ref summary for %s\n",
+		     to->dump_name ());
+	  summaries->remove (to);
+	  to_info = NULL;
+	}
+      else if (to_info && dump_file)
+	{
+	  if (dump_file)
+	    fprintf (dump_file, "Updated mod-ref summary for %s\n",
+		     to->dump_name ());
+	  to_info->dump (dump_file);
+	}
+      if (callee_info)
+	summaries->remove (edge->callee);
+    }
+  if (summaries_lto)
+    {
+      if (to_info_lto && !to_info_lto->useful_p (flags))
+	{
+	  if (dump_file)
+	    fprintf (dump_file, "Removed mod-ref summary for %s\n",
+		     to->dump_name ());
+	  summaries_lto->remove (to);
+	}
+      else if (to_info_lto && dump_file)
+	{
+	  if (dump_file)
+	    fprintf (dump_file, "Updated mod-ref summary for %s\n",
+		     to->dump_name ());
+	  to_info_lto->dump (dump_file);
+	  to_info_lto = NULL;
+	}
+      if (callee_info_lto)
+	summaries_lto->remove (edge->callee);
+    }
+  if (!to_info && !to_info_lto)
+    remove_modref_edge_summaries (to);
+  return;
+}
+
 /* Run the IPA pass.  This will take a function's summaries and calls and
    construct new summaries which represent a transitive closure.  So that
    summary of an analyzed function contains information about the loads and