[v4,3/6] tree-object-size: Support dynamic sizes in conditions

Message ID 20211201142757.4086840-4-siddhesh@gotplt.org
State Superseded
Headers
Series __builtin_dynamic_object_size |

Commit Message

Siddhesh Poyarekar Dec. 1, 2021, 2:27 p.m. UTC
  Handle GIMPLE_PHI and conditionals specially for dynamic objects,
returning PHI/conditional expressions instead of just a MIN/MAX
estimate.

This makes the returned object size variable for loops and conditionals,
so tests need to be adjusted to look for precise size in some cases.
builtin-dynamic-object-size-5.c had to be modified to only look for
success in maximum object size case and skip over the minimum object
size tests because the result is no longer a compile time constant.

I also added some simple tests to exercise conditionals with dynamic
object sizes.

gcc/ChangeLog:

	* builtins.c (fold_builtin_object_size): Adjust for dynamic size
	expressions.
	* tree-object-size.c: Include gimplify-me.h.
	(struct object_size_info): New member UNKNOWNS.
	(size_initval_p, object_sizes_get_raw): New functions.
	(object_sizes_get): Return suitable gimple variable for
	object size.
	(bundle_sizes): New function.
	(object_sizes_set): Use it and handle dynamic object size
	expressions.
	(object_sizes_set_temp): New function.
	(size_for_offset): Adjust for dynamic size expressions.
	(emit_phi_nodes, propagate_unknowns, gimplify_size_expressions):
	New functions.
	(compute_builtin_object_size): Call gimplify_size_expressions
	for OST_DYNAMIC.
	(dynamic_object_size): New function.
	(cond_expr_object_size): Use it.
	(phi_dynamic_object_size): New function.
	(collect_object_sizes_for): Call it for OST_DYNAMIC.  Adjust to
	accommodate dynamic object sizes.

gcc/testsuite/ChangeLog:

	* gcc.dg/builtin-dynamic-object-size-0.c: New tests.
	* gcc.dg/builtin-dynamic-object-size-10.c: Add comment.
	* gcc.dg/builtin-object-size-1.c [__builtin_object_size]: Expect
	exact size expressions for __builtin_dynamic_object_size.
	* gcc.dg/builtin-object-size-2.c [__builtin_object_size]:
	Likewise.
	* gcc.dg/builtin-object-size-3.c [__builtin_object_size]:
	Likewise.
	* gcc.dg/builtin-object-size-4.c [__builtin_object_size]:
	Likewise.
	* gcc.dg/builtin-object-size-5.c [__builtin_object_size]:
	Likewise.

Signed-off-by: Siddhesh Poyarekar <siddhesh@gotplt.org>
---
 gcc/builtins.c                                |   6 +-
 .../gcc.dg/builtin-dynamic-object-size-0.c    |  72 +++
 .../gcc.dg/builtin-dynamic-object-size-10.c   |   2 +
 gcc/testsuite/gcc.dg/builtin-object-size-1.c  | 119 ++++-
 gcc/testsuite/gcc.dg/builtin-object-size-2.c  |  92 ++++
 gcc/testsuite/gcc.dg/builtin-object-size-3.c  | 121 +++++
 gcc/testsuite/gcc.dg/builtin-object-size-4.c  |  78 +++
 gcc/testsuite/gcc.dg/builtin-object-size-5.c  |  12 +
 gcc/tree-object-size.c                        | 489 +++++++++++++++++-
 9 files changed, 962 insertions(+), 29 deletions(-)
 create mode 100644 gcc/testsuite/gcc.dg/builtin-dynamic-object-size-0.c
  

Comments

Jakub Jelinek Dec. 15, 2021, 4:24 p.m. UTC | #1
On Wed, Dec 01, 2021 at 07:57:54PM +0530, Siddhesh Poyarekar wrote:
> --- /dev/null
> +++ b/gcc/testsuite/gcc.dg/builtin-dynamic-object-size-0.c
> @@ -0,0 +1,72 @@
> +/* { dg-do run } */
> +/* { dg-options "-O2" } */
> +
> +typedef __SIZE_TYPE__ size_t;
> +#define abort __builtin_abort
> +
> +size_t
> +__attribute__ ((noinline))
> +test_builtin_malloc_condphi (int cond)
> +{
> +  void *ret;
> + 
> +  if (cond)
> +    ret = __builtin_malloc (32);
> +  else
> +    ret = __builtin_malloc (64);
> +
> +  return __builtin_dynamic_object_size (ret, 0);
> +}
> +
> +size_t
> +__attribute__ ((noinline))
> +test_builtin_calloc_condphi (size_t cnt, size_t sz, int cond)
> +{
> +  struct
> +    {
> +      int a;
> +      char b;
> +    } bin[cnt];
> +
> +  char *ch = __builtin_calloc (cnt, sz);
> +
> +  return __builtin_dynamic_object_size (cond ? ch : (void *) &bin, 0);

I think it would be nice if the testcases didn't leak memory, can
you replace return ... with size_t ret = 
and add
  __builtin_free (ch);
  return ret;
in both cases (in the first perhaps rename ret to ch first.

> --- a/gcc/testsuite/gcc.dg/builtin-object-size-5.c
> +++ b/gcc/testsuite/gcc.dg/builtin-object-size-5.c
> @@ -1,5 +1,7 @@
>  /* { dg-do compile { target i?86-*-linux* i?86-*-gnu* x86_64-*-linux* } } */
>  /* { dg-options "-O2" } */
> +/* For dynamic object sizes we 'succeed' if the returned size is known for
> +   maximum object size.  */
>  
>  typedef __SIZE_TYPE__ size_t;
>  extern void abort (void);
> @@ -13,7 +15,11 @@ test1 (size_t x)
>  
>    for (i = 0; i < x; ++i)
>      p = p + 4;
> +#ifdef __builtin_object_size
> +  if (__builtin_object_size (p, 0) == -1)
> +#else
>    if (__builtin_object_size (p, 0) != sizeof (buf) - 8)
> +#endif
>      abort ();
>  }
>  
> @@ -25,10 +31,15 @@ test2 (size_t x)
>  
>    for (i = 0; i < x; ++i)
>      p = p + 4;
> +#ifdef __builtin_object_size
> +  if (__builtin_object_size (p, 1) == -1)
> +#else
>    if (__builtin_object_size (p, 1) != sizeof (buf) - 8)
> +#endif
>      abort ();
>  }

I'd say for __bdos it would be better to rewrite the testcase
as dg-do run, perhaps use somewhat smaller buffer (say 16 times smaller;
and dg-additional-sources for a file that actually defines that buffer
and main.  Perhaps you can have those
#ifdef __builtin_object_size
  if (__builtin_object_size (p, 0) != sizeof (buf) - 8 - 4 * x)
#else
in there, just in the wrapper that #define __builtin_object_size
make it dg-do run and have dg-additional-sources (and
#ifndef N
#define N 0x40000000
#endif
and use that as size of buf.

> +	gcc_checking_assert (is_gimple_variable (ret)

This should be TREE_CODE (ret) == SSA_NAME
The reason why is_gimple_variable accepts VAR_DECLs/PARM_DECLs/RESULT_DECLs
is high vs. low gimple, but size_type_node sizes are gimple types and
both objsz passes are run when in ssa form, so it should always be either
a SSA_NAME or INTEGER_CST.

> +			     || TREE_CODE (ret) == INTEGER_CST);
> +    }
> +
> +  return ret;
>  }
>  
>  /* Set size for VARNO corresponding to OSI to VAL.  */
> @@ -176,27 +218,113 @@ object_sizes_initialize (struct object_size_info *osi, unsigned varno,
>    object_sizes[object_size_type][varno].wholesize = wholeval;
>  }
>  
> +/* Return a MODIFY_EXPR for cases where SSA and EXPR have the same type.  The
> +   TREE_VEC is returned only in case of PHI nodes.  */
> +
> +static tree
> +bundle_sizes (tree ssa, tree expr)
> +{
> +  gcc_checking_assert (TREE_TYPE (ssa) == sizetype);
> +
> +  if (!TREE_TYPE (expr))
> +    {
> +      gcc_checking_assert (TREE_CODE (expr) == TREE_VEC);

I think I'd prefer to do it the other way, condition on TREE_CODE (expr) == TREE_VEC
and if needed assert it has NULL TREE_TYPE.

> +      TREE_VEC_ELT (expr, TREE_VEC_LENGTH (expr) - 1) = ssa;
> +      return expr;
> +    }
> +
> +  gcc_checking_assert (types_compatible_p (TREE_TYPE (expr), sizetype));
> +  return size_binop (MODIFY_EXPR, ssa, expr);

This looks wrong.  MODIFY_EXPR isn't a binary expression
(tcc_binary/tcc_comparison), size_binop shouldn't be called on it.
I think you even don't want to fold it, so
  return build2 (MODIFY_EXPR, sizetype, ssa, expr);
?
Also, calling a parameter or var ssa is quite unusual, normally
one calls a SSA_NAME either name, or ssa_name etc.

> +	  gcc_checking_assert (size_initval_p (oldval, object_size_type));
> +	  gcc_checking_assert (size_initval_p (old_wholeval,
> +					       object_size_type));
> +	  /* For dynamic object sizes, all object sizes that are not gimple
> +	     variables will need to be gimplified.  */
> +	  if (TREE_CODE (wholeval) != INTEGER_CST
> +	      && !is_gimple_variable (wholeval))
> +	    {
> +	      bitmap_set_bit (osi->reexamine, varno);
> +	      wholeval = bundle_sizes (make_ssa_name (sizetype), wholeval);
> +	    }
> +	  if (TREE_CODE (val) != INTEGER_CST && !is_gimple_variable (val))

Again twice above.

> +/* Set temporary SSA names for object size and whole size to resolve dependency
> +   loops in dynamic size computation.  */
> +
> +static inline void
> +object_sizes_set_temp (struct object_size_info *osi, unsigned varno)
> +{
> +  tree val = object_sizes_get (osi, varno);
> +
> +  if (size_initval_p (val, osi->object_size_type))
> +    object_sizes_set (osi, varno,
> +		      make_ssa_name (sizetype),
> +		      make_ssa_name (sizetype));

This makes me a little bit worried.  Do you compute the wholesize SSA_NAME
at runtime always, or only when it is really needed and known not to always
be equal to the size?
I mean, e.g. for the cases where there is just const char *p = malloc (size);
and the pointer is never increased size == wholesize.  For __bos it will
just be 2 different INTEGER_CSTs, but if it would at runtime mean we compute
something twice and hope we eventually find out during later passes
it is the same, it would be bad.

> +  tree phires = TREE_VEC_ELT (wholesize, TREE_VEC_LENGTH (wholesize) - 1);
> +  gphi *wholephi = create_phi_node (phires, gimple_bb (stmt));
> +  phires = TREE_VEC_ELT (size, TREE_VEC_LENGTH (size) - 1);
> +  gphi *phi = create_phi_node (phires, gimple_bb (stmt));
> +  gphi *obj_phi =  as_a <gphi *> (stmt);

Just one space instead of 2 above.
And the above shows that you actually create 2 PHIs unconditionally,
rather than trying to do that only if 1) wholesize is ever actually
different from size 2) something needs wholesize.

> +      /* If we built an expression, we will need to build statements
> +	 and insert them on the edge right away.  */
> +      if (!is_gimple_variable (wsz))

Again, above comments about is_gimple_variable.

> +	wsz = force_gimple_operand (wsz, &seq, true, NULL);
> +      if (!is_gimple_variable (sz))
> +	{
> +	  gimple_seq s;
> +	  sz = force_gimple_operand (sz, &s, true, NULL);
> +	  gimple_seq_add_seq (&seq, s);
> +	}
> +
> +      if (seq)
> +	{
> +	  edge e = gimple_phi_arg_edge (obj_phi, i);
> +
> +	  /* Put the size definition before the last statement of the source
> +	     block of the PHI edge.  This ensures that any branches at the end
> +	     of the source block remain the last statement.  We are OK even if
> +	     the last statement is the definition of the object since it will
> +	     succeed any definitions that contribute to its size and the size
> +	     expression will succeed them too.  */
> +	  gimple_stmt_iterator gsi = gsi_last_bb (e->src);
> +	  gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);

This looks problematic.  The last stmt in the bb might not exist at all,
or can be one that needs to terminate the bb (stmt_ends_bb_p), or can be
some other normal stmt that is just last in the bb, or it can be a debug
stmt.  E.g. for -fcompare-debug sanity, inserting before the last stmt
in the block is wrong, because without -g it could be some random stmt and
with -g it can be a debug stmt, so the resulting stmts will then differ
between -g and -g0.  Or the last stmt could actually be computing something
you use in the expressions?
I think generally you want gsi_insert_seq_on_edge, just be prepared that it
doesn't always work - one can't insert on EH or ABNORMAL edges.
For EH/ABNORMAL edges not really sure what can be done, punt, force just
__bos behavior for it, or perhaps insert before the last stmt in that case,
but beware that it would need to be SSA_NAME_OCCURS_IN_ABNORMAL_PHI
SSA_NAME which I think needs to have underlying SSA_NAME_VAR and needs to
follow rules such that out of SSA can handle it.

	Jakub
  
Siddhesh Poyarekar Dec. 15, 2021, 5:56 p.m. UTC | #2
On 12/15/21 21:54, Jakub Jelinek wrote:
> On Wed, Dec 01, 2021 at 07:57:54PM +0530, Siddhesh Poyarekar wrote:
>> --- /dev/null
>> +++ b/gcc/testsuite/gcc.dg/builtin-dynamic-object-size-0.c
>> @@ -0,0 +1,72 @@
>> +/* { dg-do run } */
>> +/* { dg-options "-O2" } */
>> +
>> +typedef __SIZE_TYPE__ size_t;
>> +#define abort __builtin_abort
>> +
>> +size_t
>> +__attribute__ ((noinline))
>> +test_builtin_malloc_condphi (int cond)
>> +{
>> +  void *ret;
>> +
>> +  if (cond)
>> +    ret = __builtin_malloc (32);
>> +  else
>> +    ret = __builtin_malloc (64);
>> +
>> +  return __builtin_dynamic_object_size (ret, 0);
>> +}
>> +
>> +size_t
>> +__attribute__ ((noinline))
>> +test_builtin_calloc_condphi (size_t cnt, size_t sz, int cond)
>> +{
>> +  struct
>> +    {
>> +      int a;
>> +      char b;
>> +    } bin[cnt];
>> +
>> +  char *ch = __builtin_calloc (cnt, sz);
>> +
>> +  return __builtin_dynamic_object_size (cond ? ch : (void *) &bin, 0);
> 
> I think it would be nice if the testcases didn't leak memory, can
> you replace return ... with size_t ret =
> and add
>    __builtin_free (ch);
>    return ret;
> in both cases (in the first perhaps rename ret to ch first.
> 

OK, I'll fix up all patches for this.

>> --- a/gcc/testsuite/gcc.dg/builtin-object-size-5.c
>> +++ b/gcc/testsuite/gcc.dg/builtin-object-size-5.c
>> @@ -1,5 +1,7 @@
>>   /* { dg-do compile { target i?86-*-linux* i?86-*-gnu* x86_64-*-linux* } } */
>>   /* { dg-options "-O2" } */
>> +/* For dynamic object sizes we 'succeed' if the returned size is known for
>> +   maximum object size.  */
>>   
>>   typedef __SIZE_TYPE__ size_t;
>>   extern void abort (void);
>> @@ -13,7 +15,11 @@ test1 (size_t x)
>>   
>>     for (i = 0; i < x; ++i)
>>       p = p + 4;
>> +#ifdef __builtin_object_size
>> +  if (__builtin_object_size (p, 0) == -1)
>> +#else
>>     if (__builtin_object_size (p, 0) != sizeof (buf) - 8)
>> +#endif
>>       abort ();
>>   }
>>   
>> @@ -25,10 +31,15 @@ test2 (size_t x)
>>   
>>     for (i = 0; i < x; ++i)
>>       p = p + 4;
>> +#ifdef __builtin_object_size
>> +  if (__builtin_object_size (p, 1) == -1)
>> +#else
>>     if (__builtin_object_size (p, 1) != sizeof (buf) - 8)
>> +#endif
>>       abort ();
>>   }
> 
> I'd say for __bdos it would be better to rewrite the testcase
> as dg-do run, perhaps use somewhat smaller buffer (say 16 times smaller;
> and dg-additional-sources for a file that actually defines that buffer
> and main.  Perhaps you can have those
> #ifdef __builtin_object_size
>    if (__builtin_object_size (p, 0) != sizeof (buf) - 8 - 4 * x)
> #else
> in there, just in the wrapper that #define __builtin_object_size
> make it dg-do run and have dg-additional-sources (and
> #ifndef N
> #define N 0x40000000
> #endif
> and use that as size of buf.

Got it, I'll do that.

>> +	gcc_checking_assert (is_gimple_variable (ret)
> 
> This should be TREE_CODE (ret) == SSA_NAME
> The reason why is_gimple_variable accepts VAR_DECLs/PARM_DECLs/RESULT_DECLs
> is high vs. low gimple, but size_type_node sizes are gimple types and
> both objsz passes are run when in ssa form, so it should always be either
> a SSA_NAME or INTEGER_CST.

OK.

> 
>> +			     || TREE_CODE (ret) == INTEGER_CST);
>> +    }
>> +
>> +  return ret;
>>   }
>>   
>>   /* Set size for VARNO corresponding to OSI to VAL.  */
>> @@ -176,27 +218,113 @@ object_sizes_initialize (struct object_size_info *osi, unsigned varno,
>>     object_sizes[object_size_type][varno].wholesize = wholeval;
>>   }
>>   
>> +/* Return a MODIFY_EXPR for cases where SSA and EXPR have the same type.  The
>> +   TREE_VEC is returned only in case of PHI nodes.  */
>> +
>> +static tree
>> +bundle_sizes (tree ssa, tree expr)
>> +{
>> +  gcc_checking_assert (TREE_TYPE (ssa) == sizetype);
>> +
>> +  if (!TREE_TYPE (expr))
>> +    {
>> +      gcc_checking_assert (TREE_CODE (expr) == TREE_VEC);
> 
> I think I'd prefer to do it the other way, condition on TREE_CODE (expr) == TREE_VEC
> and if needed assert it has NULL TREE_TYPE.

OK.

> 
>> +      TREE_VEC_ELT (expr, TREE_VEC_LENGTH (expr) - 1) = ssa;
>> +      return expr;
>> +    }
>> +
>> +  gcc_checking_assert (types_compatible_p (TREE_TYPE (expr), sizetype));
>> +  return size_binop (MODIFY_EXPR, ssa, expr);
> 
> This looks wrong.  MODIFY_EXPR isn't a binary expression
> (tcc_binary/tcc_comparison), size_binop shouldn't be called on it.
> I think you even don't want to fold it, so
>    return build2 (MODIFY_EXPR, sizetype, ssa, expr);
> ?

Got it, I'll fix that.

> Also, calling a parameter or var ssa is quite unusual, normally
> one calls a SSA_NAME either name, or ssa_name etc.

OK.

>> +	  gcc_checking_assert (size_initval_p (oldval, object_size_type));
>> +	  gcc_checking_assert (size_initval_p (old_wholeval,
>> +					       object_size_type));
>> +	  /* For dynamic object sizes, all object sizes that are not gimple
>> +	     variables will need to be gimplified.  */
>> +	  if (TREE_CODE (wholeval) != INTEGER_CST
>> +	      && !is_gimple_variable (wholeval))
>> +	    {
>> +	      bitmap_set_bit (osi->reexamine, varno);
>> +	      wholeval = bundle_sizes (make_ssa_name (sizetype), wholeval);
>> +	    }
>> +	  if (TREE_CODE (val) != INTEGER_CST && !is_gimple_variable (val))
> 
> Again twice above.

OK.

>> +/* Set temporary SSA names for object size and whole size to resolve dependency
>> +   loops in dynamic size computation.  */
>> +
>> +static inline void
>> +object_sizes_set_temp (struct object_size_info *osi, unsigned varno)
>> +{
>> +  tree val = object_sizes_get (osi, varno);
>> +
>> +  if (size_initval_p (val, osi->object_size_type))
>> +    object_sizes_set (osi, varno,
>> +		      make_ssa_name (sizetype),
>> +		      make_ssa_name (sizetype));
> 
> This makes me a little bit worried.  Do you compute the wholesize SSA_NAME
> at runtime always, or only when it is really needed and known not to always
> be equal to the size?
> I mean, e.g. for the cases where there is just const char *p = malloc (size);
> and the pointer is never increased size == wholesize.  For __bos it will
> just be 2 different INTEGER_CSTs, but if it would at runtime mean we compute
> something twice and hope we eventually find out during later passes
> it is the same, it would be bad.

I'm emitting both size and wholesize all the time; wholesize only really 
gets used in size_for_offset and otherwise should get DCE'd.  Right now 
for __bos (and constant sizes) wholesize is unused if it is the same as 
size.

FOR GIMPLE_CALL, GIMPLE_NOP, etc. I return the same tree for size and 
wholesize; maybe a trivial pointer comparison (sz != wholesize) ought to 
get rid of most of the uses in size_for_offset.

>> +  tree phires = TREE_VEC_ELT (wholesize, TREE_VEC_LENGTH (wholesize) - 1);
>> +  gphi *wholephi = create_phi_node (phires, gimple_bb (stmt));
>> +  phires = TREE_VEC_ELT (size, TREE_VEC_LENGTH (size) - 1);
>> +  gphi *phi = create_phi_node (phires, gimple_bb (stmt));
>> +  gphi *obj_phi =  as_a <gphi *> (stmt);
> 
> Just one space instead of 2 above.
> And the above shows that you actually create 2 PHIs unconditionally,
> rather than trying to do that only if 1) wholesize is ever actually
> different from size 2) something needs wholesize.

Hmm, so I only really need wholesize in ADDR_EXPR and POINTER_PLUS 
expressions.  I suppose I could improve this bit too and reduce 
wholesize computations.

>> +      /* If we built an expression, we will need to build statements
>> +	 and insert them on the edge right away.  */
>> +      if (!is_gimple_variable (wsz))
> 
> Again, above comments about is_gimple_variable.
> 
>> +	wsz = force_gimple_operand (wsz, &seq, true, NULL);
>> +      if (!is_gimple_variable (sz))
>> +	{
>> +	  gimple_seq s;
>> +	  sz = force_gimple_operand (sz, &s, true, NULL);
>> +	  gimple_seq_add_seq (&seq, s);
>> +	}
>> +
>> +      if (seq)
>> +	{
>> +	  edge e = gimple_phi_arg_edge (obj_phi, i);
>> +
>> +	  /* Put the size definition before the last statement of the source
>> +	     block of the PHI edge.  This ensures that any branches at the end
>> +	     of the source block remain the last statement.  We are OK even if
>> +	     the last statement is the definition of the object since it will
>> +	     succeed any definitions that contribute to its size and the size
>> +	     expression will succeed them too.  */
>> +	  gimple_stmt_iterator gsi = gsi_last_bb (e->src);
>> +	  gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
> 
> This looks problematic.  The last stmt in the bb might not exist at all,

Wouldn't the bb minimally have to contain the definition of the object 
whose size we computed?  e.g. for PHI [a(2), b(3)], wouldn't bb 2 at 
least have a statement with the definition of a?

Or wait, there could be situations where the definition is in a 
different block, e.g. bb 1, which has a single edge going on to bb 2?

> or can be one that needs to terminate the bb (stmt_ends_bb_p), or can be
> some other normal stmt that is just last in the bb, or it can be a debug
> stmt.  E.g. for -fcompare-debug sanity, inserting before the last stmt
> in the block is wrong, because without -g it could be some random stmt and
> with -g it can be a debug stmt, so the resulting stmts will then differ
> between -g and -g0.  Or the last stmt could actually be computing something
> you use in the expressions?
> I think generally you want gsi_insert_seq_on_edge, just be prepared that it
> doesn't always work - one can't insert on EH or ABNORMAL edges.
> For EH/ABNORMAL edges not really sure what can be done, punt, force just
> __bos behavior for it, or perhaps insert before the last stmt in that case,
> but beware that it would need to be SSA_NAME_OCCURS_IN_ABNORMAL_PHI
> SSA_NAME which I think needs to have underlying SSA_NAME_VAR and needs to
> follow rules such that out of SSA can handle it.

I suppose __bos-like behaviour could be a good compromise, i.e. insert a 
MAX_EXPR (or MIN_EXPR) if we can't find a suitable location to insert on 
edge.

Siddhesh
  
Jakub Jelinek Dec. 15, 2021, 6:52 p.m. UTC | #3
On Wed, Dec 15, 2021 at 11:26:48PM +0530, Siddhesh Poyarekar wrote:
> > This makes me a little bit worried.  Do you compute the wholesize SSA_NAME
> > at runtime always, or only when it is really needed and known not to always
> > be equal to the size?
> > I mean, e.g. for the cases where there is just const char *p = malloc (size);
> > and the pointer is never increased size == wholesize.  For __bos it will
> > just be 2 different INTEGER_CSTs, but if it would at runtime mean we compute
> > something twice and hope we eventually find out during later passes
> > it is the same, it would be bad.
> 
> I'm emitting both size and wholesize all the time; wholesize only really
> gets used in size_for_offset and otherwise should get DCE'd.  Right now for
> __bos (and constant sizes) wholesize is unused if it is the same as size.
> 
> FOR GIMPLE_CALL, GIMPLE_NOP, etc. I return the same tree for size and
> wholesize; maybe a trivial pointer comparison (sz != wholesize) ought to get
> rid of most of the uses in size_for_offset.

Perhaps DCE can handle well when you compute something (wholesize) that isn't really
needed and VN/CSE the case where size and wholesize is equal.  I think it
would be worth looking at a few testcases.

> > > +	{
> > > +	  edge e = gimple_phi_arg_edge (obj_phi, i);
> > > +
> > > +	  /* Put the size definition before the last statement of the source
> > > +	     block of the PHI edge.  This ensures that any branches at the end
> > > +	     of the source block remain the last statement.  We are OK even if
> > > +	     the last statement is the definition of the object since it will
> > > +	     succeed any definitions that contribute to its size and the size
> > > +	     expression will succeed them too.  */
> > > +	  gimple_stmt_iterator gsi = gsi_last_bb (e->src);
> > > +	  gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
> > 
> > This looks problematic.  The last stmt in the bb might not exist at all,
> 
> Wouldn't the bb minimally have to contain the definition of the object whose
> size we computed?  e.g. for PHI [a(2), b(3)], wouldn't bb 2 at least have a
> statement with the definition of a?

It can e.g. contain just a PHI.

> Or wait, there could be situations where the definition is in a different
> block, e.g. bb 1, which has a single edge going on to bb 2?

> I suppose __bos-like behaviour could be a good compromise, i.e. insert a
> MAX_EXPR (or MIN_EXPR) if we can't find a suitable location to insert on
> edge.

MAX_EXPR or MIN_EXPR?  I'd have expect the __bos constant in there.
But I must say I'm right now unsure what kind of PHIs one can have on bbs
reachable from both ab/eh edges and normal edges if we can have such bbs at
all.  I guess looking at some sigjmp/longjmp or non-local or computed goto
testcases might show something, perhaps I'll have a look tomorrow.
I'm sure we can have vop PHI.

	Jakub
  

Patch

diff --git a/gcc/builtins.c b/gcc/builtins.c
index 3b815a6e42a..52b62f32b44 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -10284,7 +10284,8 @@  fold_builtin_object_size (tree ptr, tree ost, enum built_in_function fcode)
   if (TREE_CODE (ptr) == ADDR_EXPR)
     {
       compute_builtin_object_size (ptr, object_size_type, &bytes);
-      if (int_fits_type_p (bytes, size_type_node))
+      if ((object_size_type & OST_DYNAMIC)
+	  || int_fits_type_p (bytes, size_type_node))
 	return fold_convert (size_type_node, bytes);
     }
   else if (TREE_CODE (ptr) == SSA_NAME)
@@ -10293,7 +10294,8 @@  fold_builtin_object_size (tree ptr, tree ost, enum built_in_function fcode)
        later.  Maybe subsequent passes will help determining
        it.  */
       if (compute_builtin_object_size (ptr, object_size_type, &bytes)
-	  && int_fits_type_p (bytes, size_type_node))
+	  && ((object_size_type & OST_DYNAMIC)
+	      || int_fits_type_p (bytes, size_type_node)))
 	return fold_convert (size_type_node, bytes);
     }
 
diff --git a/gcc/testsuite/gcc.dg/builtin-dynamic-object-size-0.c b/gcc/testsuite/gcc.dg/builtin-dynamic-object-size-0.c
new file mode 100644
index 00000000000..ddedf6a49bd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/builtin-dynamic-object-size-0.c
@@ -0,0 +1,72 @@ 
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+
+typedef __SIZE_TYPE__ size_t;
+#define abort __builtin_abort
+
+size_t
+__attribute__ ((noinline))
+test_builtin_malloc_condphi (int cond)
+{
+  void *ret;
+ 
+  if (cond)
+    ret = __builtin_malloc (32);
+  else
+    ret = __builtin_malloc (64);
+
+  return __builtin_dynamic_object_size (ret, 0);
+}
+
+size_t
+__attribute__ ((noinline))
+test_builtin_calloc_condphi (size_t cnt, size_t sz, int cond)
+{
+  struct
+    {
+      int a;
+      char b;
+    } bin[cnt];
+
+  char *ch = __builtin_calloc (cnt, sz);
+
+  return __builtin_dynamic_object_size (cond ? ch : (void *) &bin, 0);
+}
+
+size_t
+__attribute__ ((noinline))
+test_deploop (size_t sz, size_t cond)
+{
+  char *bin = __builtin_alloca (32);
+
+  for (size_t i = 0; i < sz; i++)
+    if (i == cond)
+      bin = __builtin_alloca (64);
+
+  return __builtin_dynamic_object_size (bin, 0);
+}
+
+unsigned nfails = 0;
+
+#define FAIL() ({ \
+  __builtin_printf ("Failure at line: %d\n", __LINE__);			      \
+  nfails++;								      \
+})
+
+int
+main (int argc, char **argv)
+{
+  if (test_builtin_malloc_condphi (1) != 32)
+    FAIL ();
+  if (test_builtin_malloc_condphi (0) != 64)
+    FAIL ();
+  if (test_builtin_calloc_condphi (128, 1, 0) == 128)
+    FAIL ();
+  if (test_deploop (128, 129) != 32)
+    FAIL ();
+
+  if (nfails > 0)
+    __builtin_abort ();
+
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/builtin-dynamic-object-size-10.c b/gcc/testsuite/gcc.dg/builtin-dynamic-object-size-10.c
index bc880a589ae..3a2d9821a44 100644
--- a/gcc/testsuite/gcc.dg/builtin-dynamic-object-size-10.c
+++ b/gcc/testsuite/gcc.dg/builtin-dynamic-object-size-10.c
@@ -5,5 +5,7 @@ 
 #define __builtin_object_size __builtin_dynamic_object_size
 #include "builtin-object-size-10.c"
 
+/* early_objsz should resolve __builtin_dynamic_object_size like
+   __builtin_object_size.  */
 /* { dg-final { scan-tree-dump "maximum object size 21" "early_objsz" } } */
 /* { dg-final { scan-tree-dump "maximum subobject size 16" "early_objsz" } } */
diff --git a/gcc/testsuite/gcc.dg/builtin-object-size-1.c b/gcc/testsuite/gcc.dg/builtin-object-size-1.c
index 0154f4e9695..265c87ed6fb 100644
--- a/gcc/testsuite/gcc.dg/builtin-object-size-1.c
+++ b/gcc/testsuite/gcc.dg/builtin-object-size-1.c
@@ -42,9 +42,17 @@  test1 (void *q, int x)
     abort ();
   if (__builtin_object_size (q, 0) != (size_t) -1)
     abort ();
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 0)
+      != (x < 0
+	  ? sizeof (a) - __builtin_offsetof (struct A, a) - 9
+	  : sizeof (a) - __builtin_offsetof (struct A, c) - 1))
+    abort ();
+#else
   if (__builtin_object_size (r, 0)
       != sizeof (a) - __builtin_offsetof (struct A, a) - 9)
     abort ();
+#endif
   if (x < 6)
     r = &w[2].a[1];
   else
@@ -58,9 +66,17 @@  test1 (void *q, int x)
   if (__builtin_object_size (&y.b, 0)
       != sizeof (a) - __builtin_offsetof (struct A, b))
     abort ();
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 0)
+      != (x < 6
+	  ? 2 * sizeof (w[0]) - __builtin_offsetof (struct A, a) - 1
+	  : sizeof (a) - __builtin_offsetof (struct A, a) - 6))
+    abort ();
+#else
   if (__builtin_object_size (r, 0)
       != 2 * sizeof (w[0]) - __builtin_offsetof (struct A, a) - 1)
     abort ();
+#endif
   if (x < 20)
     r = malloc (30);
   else
@@ -165,6 +181,7 @@  test2 (void)
   struct B { char buf1[10]; char buf2[10]; } a;
   char *r, buf3[20];
   int i;
+  size_t res;
 
   if (sizeof (a) != 20)
     return;
@@ -181,7 +198,24 @@  test2 (void)
       else if (i == l1 + 2)
 	r = &a.buf1[9];
     }
-  if (__builtin_object_size (r, 0) != 20)
+#ifdef __builtin_object_size
+  res = sizeof (buf3);
+
+  for (i = 0; i < 4; ++i)
+    {
+      if (i == l1 - 1)
+        res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 1;
+      else if (i == l1)
+        res = sizeof (a) - __builtin_offsetof (struct B, buf2) - 7;
+      else if (i == l1 + 1)
+        res = sizeof (buf3) - 5;
+      else if (i == l1 + 2)
+        res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 9;
+    }
+#else
+  res = 20;
+#endif
+  if (__builtin_object_size (r, 0) != res)
     abort ();
   r = &buf3[20];
   for (i = 0; i < 4; ++i)
@@ -195,13 +229,45 @@  test2 (void)
       else if (i == l1 + 2)
 	r = &a.buf1[9];
     }
-  if (__builtin_object_size (r, 0) != 15)
+#ifdef __builtin_object_size
+  res = sizeof (buf3) - 20;
+
+  for (i = 0; i < 4; ++i)
+    {
+      if (i == l1 - 1)
+        res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 7;
+      else if (i == l1)
+        res = sizeof (a) - __builtin_offsetof (struct B, buf2) - 7;
+      else if (i == l1 + 1)
+        res = sizeof (buf3) - 5;
+      else if (i == l1 + 2)
+        res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 9;
+    }
+  if (__builtin_object_size (r, 0) != res)
+    abort ();
+#else
+  res = 15;
+#endif
+  if (__builtin_object_size (r, 0) != res)
     abort ();
   r += 8;
+#ifdef __builtin_object_size
+  res -= 8;
+  if (__builtin_object_size (r, 0) != res)
+    abort ();
+  if (res >= 6)
+    {
+      if (__builtin_object_size (r + 6, 0) != res - 6)
+        abort ();
+    }
+  else if (__builtin_object_size (r + 6, 0) != 0)
+    abort ();
+#else
   if (__builtin_object_size (r, 0) != 7)
     abort ();
   if (__builtin_object_size (r + 6, 0) != 1)
     abort ();
+#endif
   r = &buf3[18];
   for (i = 0; i < 4; ++i)
     {
@@ -214,8 +280,31 @@  test2 (void)
       else if (i == l1 + 2)
 	r = &a.buf1[4];
     }
+#ifdef __builtin_object_size
+  res = sizeof (buf3) - 18;
+
+  for (i = 0; i < 4; ++i)
+    {
+      if (i == l1 - 1)
+          res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 9;
+      else if (i == l1)
+        res = sizeof (a) - __builtin_offsetof (struct B, buf2) - 9;
+      else if (i == l1 + 1)
+        res = sizeof (buf3) - 5;
+      else if (i == l1 + 2)
+        res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 4;
+    }
+  if (res >= 12)
+    {
+      if (__builtin_object_size (r + 12, 0) != res - 12)
+        abort ();
+    }
+  else if (__builtin_object_size (r + 12, 0) != 0)
+    abort ();
+#else
   if (__builtin_object_size (r + 12, 0) != 4)
     abort ();
+#endif
 }
 
 void
@@ -358,6 +447,10 @@  test5 (size_t x)
 
   for (i = 0; i < x; ++i)
     p = p + 4;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 0) != sizeof (buf) - 8 - 4 * x)
+    abort ();
+#else
   /* My understanding of ISO C99 6.5.6 is that a conforming
      program will not end up with p equal to &buf[0]
      through &buf[7], i.e. calling this function with say
@@ -367,6 +460,7 @@  test5 (size_t x)
      it would be 64 (or conservative (size_t) -1 == unknown).  */
   if (__builtin_object_size (p, 0) != sizeof (buf) - 8)
     abort ();
+#endif
   memset (p, ' ', sizeof (buf) - 8 - 4 * 4);
 }
 
@@ -380,8 +474,13 @@  test6 (size_t x)
 
   for (i = 0; i < x; ++i)
     p = p + 4;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 0) != sizeof (t) - 8 - 4 * x)
+    abort ();
+#else
   if (__builtin_object_size (p, 0) != sizeof (t) - 8)
     abort ();
+#endif
   memset (p, ' ', sizeof (t) - 8 - 4 * 4);
 }
 
@@ -436,21 +535,37 @@  test9 (unsigned cond)
   else
     p = &buf2[4];
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (&p[-4], 0) != (cond ? 6 : 10))
+    abort ();
+#else
   if (__builtin_object_size (&p[-4], 0) != 10)
     abort ();
+#endif
 
   for (unsigned i = cond; i > 0; i--)
     p--;
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 0) != ((cond ? 2 : 6) + cond))
+    abort ();
+#else
   if (__builtin_object_size (p, 0) != 10)
     abort ();
+#endif
 
   p = &y.c[8];
   for (unsigned i = cond; i > 0; i--)
     p--;
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 0)
+      != sizeof (y) - __builtin_offsetof (struct A, c) - 8 + cond)
+    abort ();
+#else
   if (__builtin_object_size (p, 0) != sizeof (y))
     abort ();
+#endif
 }
 
 int
diff --git a/gcc/testsuite/gcc.dg/builtin-object-size-2.c b/gcc/testsuite/gcc.dg/builtin-object-size-2.c
index 5cf29291aff..5051fea47c3 100644
--- a/gcc/testsuite/gcc.dg/builtin-object-size-2.c
+++ b/gcc/testsuite/gcc.dg/builtin-object-size-2.c
@@ -43,8 +43,15 @@  test1 (void *q, int x)
     abort ();
   if (__builtin_object_size (q, 1) != (size_t) -1)
     abort ();
+#ifdef __builtin_object_size
+  if (x < 0
+      ? __builtin_object_size (r, 1) != sizeof (a.a) - 9
+      : __builtin_object_size (r, 1) != sizeof (a.c) - 1)
+    abort ();
+#else
   if (__builtin_object_size (r, 1) != sizeof (a.c) - 1)
     abort ();
+#endif
   if (x < 6)
     r = &w[2].a[1];
   else
@@ -55,8 +62,15 @@  test1 (void *q, int x)
     abort ();
   if (__builtin_object_size (&y.b, 1) != sizeof (a.b))
     abort ();
+#ifdef __builtin_object_size
+  if (x < 6
+      ? __builtin_object_size (r, 1) != sizeof (a.a) - 1
+      : __builtin_object_size (r, 1) != sizeof (a.a) - 6)
+    abort ();
+#else
   if (__builtin_object_size (r, 1) != sizeof (a.a) - 1)
     abort ();
+#endif
   if (x < 20)
     r = malloc (30);
   else
@@ -185,6 +199,9 @@  test2 (void)
   struct B { char buf1[10]; char buf2[10]; } a;
   char *r, buf3[20];
   int i;
+#ifdef __builtin_object_size
+  size_t dyn_res;
+#endif
 
   if (sizeof (a) != 20)
     return;
@@ -201,8 +218,26 @@  test2 (void)
       else if (i == l1 + 2)
 	r = &a.buf1[9];
     }
+#ifdef __builtin_object_size
+  dyn_res = sizeof (buf3);
+
+  for (i = 0; i < 4; ++i)
+    {
+      if (i == l1 - 1)
+	dyn_res = sizeof (a.buf1) - 1;
+      else if (i == l1)
+	dyn_res = sizeof (a.buf2) - 7;
+      else if (i == l1 + 1)
+	dyn_res = sizeof (buf3) - 5;
+      else if (i == l1 + 2)
+	dyn_res = sizeof (a.buf1) - 9;
+    }
+  if (__builtin_object_size (r, 1) != dyn_res)
+    abort ();
+#else
   if (__builtin_object_size (r, 1) != sizeof (buf3))
     abort ();
+#endif
   r = &buf3[20];
   for (i = 0; i < 4; ++i)
     {
@@ -215,13 +250,50 @@  test2 (void)
       else if (i == l1 + 2)
 	r = &a.buf1[9];
     }
+#ifdef __builtin_object_size
+  dyn_res = sizeof (buf3) - 20;
+
+  for (i = 0; i < 4; ++i)
+    {
+      if (i == l1 - 1)
+        dyn_res = sizeof (a.buf1) - 7;
+      else if (i == l1)
+        dyn_res = sizeof (a.buf2) - 7;
+      else if (i == l1 + 1)
+        dyn_res = sizeof (buf3) - 5;
+      else if (i == l1 + 2)
+        dyn_res = sizeof (a.buf1) - 9;
+    }
+  if (__builtin_object_size (r, 1) != dyn_res)
+    abort ();
+#else
   if (__builtin_object_size (r, 1) != sizeof (buf3) - 5)
     abort ();
+#endif
   r += 8;
+#ifdef __builtin_object_size
+  if (dyn_res >= 8)
+    {
+      dyn_res -= 8;
+      if (__builtin_object_size (r, 1) != dyn_res)
+	abort ();
+
+      if (dyn_res >= 6)
+	{
+	  if (__builtin_object_size (r + 6, 1) != dyn_res - 6)
+	    abort ();
+	}
+      else if (__builtin_object_size (r + 6, 1) != 0)
+	abort ();
+    }
+  else if (__builtin_object_size (r, 1) != 0)
+    abort ();
+#else
   if (__builtin_object_size (r, 1) != sizeof (buf3) - 13)
     abort ();
   if (__builtin_object_size (r + 6, 1) != sizeof (buf3) - 19)
     abort ();
+#endif
 }
 
 void
@@ -339,8 +411,13 @@  test5 (size_t x)
 
   for (i = 0; i < x; ++i)
     p = p + 4;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 1) != sizeof (t.buf) - 8 - 4 * x)
+    abort ();
+#else
   if (__builtin_object_size (p, 1) != sizeof (t.buf) - 8)
     abort ();
+#endif
   memset (p, ' ', sizeof (t.buf) - 8 - 4 * 4);
 }
 
@@ -394,21 +471,36 @@  test8 (unsigned cond)
   else
     p = &buf2[4];
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (&p[-4], 1) != (cond ? 6 : 10))
+    abort ();
+#else
   if (__builtin_object_size (&p[-4], 1) != 10)
     abort ();
+#endif
 
   for (unsigned i = cond; i > 0; i--)
     p--;
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 1) != ((cond ? 2 : 6) + cond))
+    abort ();
+#else
   if (__builtin_object_size (p, 1) != 10)
     abort ();
+#endif
 
   p = &y.c[8];
   for (unsigned i = cond; i > 0; i--)
     p--;
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 1) != sizeof (y.c) - 8 + cond)
+    abort ();
+#else
   if (__builtin_object_size (p, 1) != sizeof (y.c))
     abort ();
+#endif
 }
 
 int
diff --git a/gcc/testsuite/gcc.dg/builtin-object-size-3.c b/gcc/testsuite/gcc.dg/builtin-object-size-3.c
index 3a692c4e3d2..1d92627266b 100644
--- a/gcc/testsuite/gcc.dg/builtin-object-size-3.c
+++ b/gcc/testsuite/gcc.dg/builtin-object-size-3.c
@@ -71,23 +71,45 @@  test1 (void *q, int x)
     r = malloc (30);
   else
     r = calloc (2, 14);
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 2) != (x < 20 ? 30 : 2 * 14))
+    abort ();
+#else
   if (__builtin_object_size (r, 2) != 2 * 14)
     abort ();
+#endif
   if (x < 30)
     r = malloc (sizeof (a));
   else
     r = &a.a[3];
+#ifdef __builtin_object_size
+  size_t objsz = (x < 30 ? sizeof (a)
+                  : sizeof (a) - __builtin_offsetof (struct A, a) - 3);
+  if (__builtin_object_size (r, 2) != objsz)
+    abort ();
+#else
   if (__builtin_object_size (r, 2)
       != sizeof (a) - __builtin_offsetof (struct A, a) - 3)
     abort ();
+#endif
   r = memcpy (r, "a", 2);
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 2) != objsz)
+    abort ();
+#else
   if (__builtin_object_size (r, 2)
       != sizeof (a) - __builtin_offsetof (struct A, a) - 3)
     abort ();
+#endif
   r = memcpy (r + 2, "b", 2) + 2;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 2) != objsz - 4)
+    abort ();
+#else
   if (__builtin_object_size (r, 2)
       != sizeof (a) - __builtin_offsetof (struct A, a) - 3 - 4)
     abort ();
+#endif
   r = &a.a[4];
   r = memset (r, 'a', 2);
   if (__builtin_object_size (r, 2)
@@ -164,6 +186,9 @@  test2 (void)
   struct B { char buf1[10]; char buf2[10]; } a;
   char *r, buf3[20];
   int i;
+#ifdef __builtin_object_size
+  size_t dyn_res;
+#endif
 
   if (sizeof (a) != 20)
     return;
@@ -180,8 +205,26 @@  test2 (void)
       else if (i == l1 + 2)
 	r = &a.buf1[9];
     }
+#ifdef __builtin_object_size
+  dyn_res = sizeof (buf3);
+
+  for (i = 0; i < 4; ++i)
+    {
+      if (i == l1 - 1)
+	dyn_res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 1;
+      else if (i == l1)
+	dyn_res = sizeof (a) - __builtin_offsetof (struct B, buf2) - 7;
+      else if (i == l1 + 1)
+	dyn_res = sizeof (buf3) - 5;
+      else if (i == l1 + 2)
+	dyn_res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 9;
+    }
+  if (__builtin_object_size (r, 2) != dyn_res)
+    abort ();
+#else
   if (__builtin_object_size (r, 2) != 3)
     abort ();
+#endif
   r = &buf3[20];
   for (i = 0; i < 4; ++i)
     {
@@ -208,13 +251,44 @@  test2 (void)
       else if (i == l1 + 2)
 	r = &a.buf1[4];
     }
+#ifdef __builtin_object_size
+  dyn_res = sizeof (buf3) - 2;
+
+  for (i = 0; i < 4; ++i)
+    {
+      if (i == l1 - 1)
+	dyn_res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 1;
+      else if (i == l1)
+	dyn_res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 2;
+      else if (i == l1 + 1)
+	dyn_res = sizeof (buf3) - 5;
+      else if (i == l1 + 2)
+	dyn_res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 4;
+    }
+  if (__builtin_object_size (r, 2) != dyn_res)
+    abort ();
+#else
   if (__builtin_object_size (r, 2) != 15)
     abort ();
+#endif
   r += 8;
+#ifdef __builtin_object_size
+  dyn_res -= 8;
+  if (__builtin_object_size (r, 2) != dyn_res)
+    abort ();
+  if (dyn_res >= 6)
+    {
+      if (__builtin_object_size (r + 6, 2) != dyn_res - 6)
+	abort ();
+    }
+  else if (__builtin_object_size (r + 6, 2) != 0)
+    abort ();
+#else
   if (__builtin_object_size (r, 2) != 7)
     abort ();
   if (__builtin_object_size (r + 6, 2) != 1)
     abort ();
+#endif
   r = &buf3[18];
   for (i = 0; i < 4; ++i)
     {
@@ -227,8 +301,31 @@  test2 (void)
       else if (i == l1 + 2)
 	r = &a.buf1[4];
     }
+#ifdef __builtin_object_size
+  dyn_res = sizeof (buf3) - 18;
+
+  for (i = 0; i < 4; ++i)
+    {
+      if (i == l1 - 1)
+	dyn_res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 9;
+      else if (i == l1)
+	dyn_res = sizeof (a) - __builtin_offsetof (struct B, buf2) - 9;
+      else if (i == l1 + 1)
+	dyn_res = sizeof (buf3) - 5;
+      else if (i == l1 + 2)
+	dyn_res = sizeof (a) - __builtin_offsetof (struct B, buf1) - 4;
+    }
+  if (dyn_res >= 12)
+    {
+      if (__builtin_object_size (r + 12, 2) != dyn_res - 12)
+	abort ();
+    }
+  else if (__builtin_object_size (r + 12, 2) != 0)
+    abort ();
+#else
   if (__builtin_object_size (r + 12, 2) != 0)
     abort ();
+#endif
 }
 
 void
@@ -371,7 +468,11 @@  test5 (size_t x)
 
   for (i = 0; i < x; ++i)
     p = p + 4;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 2) != sizeof (buf) - 8 - 4 * x)
+#else
   if (__builtin_object_size (p, 2) != 0)
+#endif
     abort ();
   memset (p, ' ', sizeof (buf) - 8 - 4 * 4);
 }
@@ -386,7 +487,11 @@  test6 (size_t x)
 
   for (i = 0; i < x; ++i)
     p = p + 4;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 2) != sizeof (t) - 8 - 4 * x)
+#else
   if (__builtin_object_size (p, 2) != 0)
+#endif
     abort ();
   memset (p, ' ', sizeof (t) - 8 - 4 * 4);
 }
@@ -442,22 +547,38 @@  test9 (unsigned cond)
   else
     p = &buf2[4];
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (&p[-4], 2) != (cond ? 6 : 10))
+    abort ();
+#else
   if (__builtin_object_size (&p[-4], 2) != 6)
     abort ();
+#endif
 
   for (unsigned i = cond; i > 0; i--)
     p--;
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 2) != ((cond ? 2 : 6) + cond))
+    abort ();
+#else
   if (__builtin_object_size (p, 2) != 2)
     abort ();
+#endif
 
   p = &y.c[8];
   for (unsigned i = cond; i > 0; i--)
     p--;
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 2)
+      != sizeof (y) - __builtin_offsetof (struct A, c) - 8 + cond)
+    abort ();
+#else
   if (__builtin_object_size (p, 2)
       != sizeof (y) - __builtin_offsetof (struct A, c) - 8)
     abort ();
+#endif
 }
 
 int
diff --git a/gcc/testsuite/gcc.dg/builtin-object-size-4.c b/gcc/testsuite/gcc.dg/builtin-object-size-4.c
index 87381620cc9..9da3537a5f7 100644
--- a/gcc/testsuite/gcc.dg/builtin-object-size-4.c
+++ b/gcc/testsuite/gcc.dg/builtin-object-size-4.c
@@ -43,7 +43,12 @@  test1 (void *q, int x)
     abort ();
   if (__builtin_object_size (q, 3) != 0)
     abort ();
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 3)
+      != (x < 0 ? sizeof (a.a) - 9 : sizeof (a.c) - 1))
+#else
   if (__builtin_object_size (r, 3) != sizeof (a.a) - 9)
+#endif
     abort ();
   if (x < 6)
     r = &w[2].a[1];
@@ -55,31 +60,57 @@  test1 (void *q, int x)
     abort ();
   if (__builtin_object_size (&y.b, 3) != sizeof (a.b))
     abort ();
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 3)
+      != (x < 6 ? sizeof (w[2].a) - 1 : sizeof (a.a) - 6))
+#else
   if (__builtin_object_size (r, 3) != sizeof (a.a) - 6)
+#endif
     abort ();
   if (x < 20)
     r = malloc (30);
   else
     r = calloc (2, 16);
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 3) != (x < 20 ? 30 : 2 * 16))
+#else
   if (__builtin_object_size (r, 3) != 30)
+#endif
     abort ();
   if (x < 20)
     r = malloc (30);
   else
     r = calloc (2, 14);
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 3) != (x < 20 ? 30 : 2 * 14))
+#else
   if (__builtin_object_size (r, 3) != 2 * 14)
+#endif
     abort ();
   if (x < 30)
     r = malloc (sizeof (a));
   else
     r = &a.a[3];
+#ifdef __builtin_object_size
+  size_t objsz = x < 30 ? sizeof (a) : sizeof (a.a) - 3;
+  if (__builtin_object_size (r, 3) != objsz)
+#else
   if (__builtin_object_size (r, 3) != sizeof (a.a) - 3)
+#endif
     abort ();
   r = memcpy (r, "a", 2);
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 3) != objsz)
+#else
   if (__builtin_object_size (r, 3) != sizeof (a.a) - 3)
+#endif
     abort ();
   r = memcpy (r + 2, "b", 2) + 2;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 3) != objsz - 4)
+#else
   if (__builtin_object_size (r, 3) != sizeof (a.a) - 3 - 4)
+#endif
     abort ();
   r = &a.a[4];
   r = memset (r, 'a', 2);
@@ -184,6 +215,9 @@  test2 (void)
   struct B { char buf1[10]; char buf2[10]; } a;
   char *r, buf3[20];
   int i;
+#ifdef __builtin_object_size
+  size_t dyn_res = 0;
+#endif
 
   if (sizeof (a) != 20)
     return;
@@ -228,13 +262,38 @@  test2 (void)
       else if (i == l1 + 2)
 	r = &a.buf1[2];
     }
+#ifdef __builtin_object_size
+  dyn_res = sizeof (buf3) - 1;
+
+  for (i = 0; i < 4; ++i)
+    {
+      if (i == l1 - 1)
+        dyn_res = sizeof (a.buf1) - 6;
+      else if (i == l1)
+        dyn_res = sizeof (a.buf2) - 4;
+      else if (i == l1 + 1)
+        dyn_res = sizeof (buf3) - 5;
+      else if (i == l1 + 2)
+        dyn_res = sizeof (a.buf1) - 2;
+    }
+  if (__builtin_object_size (r, 3) != dyn_res)
+    abort ();
+#else
   if (__builtin_object_size (r, 3) != sizeof (a.buf1) - 6)
     abort ();
+#endif
   r += 2;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (r, 3) != dyn_res - 2)
+    abort ();
+  if (__builtin_object_size (r + 1, 3) != dyn_res - 3)
+    abort ();
+#else
   if (__builtin_object_size (r, 3) != sizeof (a.buf1) - 6 - 2)
     abort ();
   if (__builtin_object_size (r + 1, 3) != sizeof (a.buf1) - 6 - 3)
     abort ();
+#endif
 }
 
 void
@@ -352,7 +411,11 @@  test5 (size_t x)
 
   for (i = 0; i < x; ++i)
     p = p + 4;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 3) != sizeof (t.buf) - 8 - 4 * x)
+#else
   if (__builtin_object_size (p, 3) != 0)
+#endif
     abort ();
   memset (p, ' ', sizeof (t.buf) - 8 - 4 * 4);
 }
@@ -407,21 +470,36 @@  test8 (unsigned cond)
   else
     p = &buf2[4];
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (&p[-4], 3) != (cond ? 6 : 10))
+    abort ();
+#else
   if (__builtin_object_size (&p[-4], 3) != 6)
     abort ();
+#endif
 
   for (unsigned i = cond; i > 0; i--)
     p--;
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 3) != ((cond ? 2 : 6) + cond))
+    abort ();
+#else
   if (__builtin_object_size (p, 3) != 2)
     abort ();
+#endif
 
   p = &y.c[8];
   for (unsigned i = cond; i > 0; i--)
     p--;
 
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 3) != sizeof (y.c) - 8 + cond)
+    abort ();
+#else
   if (__builtin_object_size (p, 3) != sizeof (y.c) - 8)
     abort ();
+#endif
 }
 
 int
diff --git a/gcc/testsuite/gcc.dg/builtin-object-size-5.c b/gcc/testsuite/gcc.dg/builtin-object-size-5.c
index 8e63d9c7a5e..904e616949d 100644
--- a/gcc/testsuite/gcc.dg/builtin-object-size-5.c
+++ b/gcc/testsuite/gcc.dg/builtin-object-size-5.c
@@ -1,5 +1,7 @@ 
 /* { dg-do compile { target i?86-*-linux* i?86-*-gnu* x86_64-*-linux* } } */
 /* { dg-options "-O2" } */
+/* For dynamic object sizes we 'succeed' if the returned size is known for
+   maximum object size.  */
 
 typedef __SIZE_TYPE__ size_t;
 extern void abort (void);
@@ -13,7 +15,11 @@  test1 (size_t x)
 
   for (i = 0; i < x; ++i)
     p = p + 4;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 0) == -1)
+#else
   if (__builtin_object_size (p, 0) != sizeof (buf) - 8)
+#endif
     abort ();
 }
 
@@ -25,10 +31,15 @@  test2 (size_t x)
 
   for (i = 0; i < x; ++i)
     p = p + 4;
+#ifdef __builtin_object_size
+  if (__builtin_object_size (p, 1) == -1)
+#else
   if (__builtin_object_size (p, 1) != sizeof (buf) - 8)
+#endif
     abort ();
 }
 
+#ifndef __builtin_object_size
 void
 test3 (size_t x)
 {
@@ -52,6 +63,7 @@  test4 (size_t x)
   if (__builtin_object_size (p, 3) != 0)
     abort ();
 }
+#endif
 
 void
 test5 (void)
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index c4f3fbeb8f5..dd6c90efebf 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -35,13 +35,14 @@  along with GCC; see the file COPYING3.  If not see
 #include "stringpool.h"
 #include "attribs.h"
 #include "builtins.h"
+#include "gimplify-me.h"
 
 struct object_size_info
 {
   int object_size_type;
   unsigned char pass;
   bool changed;
-  bitmap visited, reexamine;
+  bitmap visited, reexamine, unknowns;
   unsigned int *depths;
   unsigned int *stack, *tos;
 };
@@ -74,7 +75,11 @@  static void check_for_plus_in_loops_1 (struct object_size_info *, tree,
    object_sizes[1] is upper bound for the object size and number of bytes till
    the end of the subobject (innermost array or field with address taken).
    object_sizes[2] is lower bound for the object size and number of bytes till
-   the end of the object and object_sizes[3] lower bound for subobject.  */
+   the end of the object and object_sizes[3] lower bound for subobject.
+
+   For static object sizes, the object size and the bytes till the end of the
+   object are both INTEGER_CST.  In the dynamic case, they are finally either a
+   gimple variable or an INTEGER_CST.  */
 static vec<object_size> object_sizes[OST_END];
 
 /* Bitmaps what object sizes have been computed already.  */
@@ -100,6 +105,15 @@  unknown (int object_size_type)
   return ~initval (object_size_type);
 }
 
+/* Return true if VAL is represents an initial size for OBJECT_SIZE_TYPE.  */
+
+static inline bool
+size_initval_p (tree val, int object_size_type)
+{
+  return (tree_fits_uhwi_p (val)
+	  && tree_to_uhwi (val) == initval (object_size_type));
+}
+
 /* Return true if VAL is represents an unknown size for OBJECT_SIZE_TYPE.  */
 
 static inline bool
@@ -151,17 +165,45 @@  object_sizes_unknown_p (int object_size_type, unsigned varno)
 			 object_size_type);
 }
 
-/* Return size for VARNO corresponding to OSI.  If WHOLE is true, return the
-   whole object size.  */
+/* Return the raw size expression for VARNO corresponding to OSI.  This returns
+   the TREE_VEC as is and should only be used during gimplification.  */
+
+static inline object_size
+object_sizes_get_raw (struct object_size_info *osi, unsigned varno)
+{
+  gcc_assert (osi->pass != 0);
+  return object_sizes[osi->object_size_type][varno];
+}
+
+/* Return a size tree for VARNO corresponding to OSI.  If WHOLE is true, return
+   the whole object size.  Use this for building size expressions based on size
+   of VARNO.  */
 
 static inline tree
 object_sizes_get (struct object_size_info *osi, unsigned varno,
 		  bool whole = false)
 {
+  tree ret;
+
   if (whole)
-    return object_sizes[osi->object_size_type][varno].wholesize;
+    ret = object_sizes[osi->object_size_type][varno].wholesize;
   else
-    return object_sizes[osi->object_size_type][varno].size;
+    ret = object_sizes[osi->object_size_type][varno].size;
+
+  int object_size_type = osi->object_size_type;
+
+  if (object_size_type & OST_DYNAMIC)
+    {
+      if (TREE_CODE (ret) == MODIFY_EXPR)
+	return TREE_OPERAND (ret, 0);
+      else if (TREE_CODE (ret) == TREE_VEC)
+	return TREE_VEC_ELT (ret, TREE_VEC_LENGTH (ret) - 1);
+      else
+	gcc_checking_assert (is_gimple_variable (ret)
+			     || TREE_CODE (ret) == INTEGER_CST);
+    }
+
+  return ret;
 }
 
 /* Set size for VARNO corresponding to OSI to VAL.  */
@@ -176,27 +218,113 @@  object_sizes_initialize (struct object_size_info *osi, unsigned varno,
   object_sizes[object_size_type][varno].wholesize = wholeval;
 }
 
+/* Return a MODIFY_EXPR for cases where SSA and EXPR have the same type.  The
+   TREE_VEC is returned only in case of PHI nodes.  */
+
+static tree
+bundle_sizes (tree ssa, tree expr)
+{
+  gcc_checking_assert (TREE_TYPE (ssa) == sizetype);
+
+  if (!TREE_TYPE (expr))
+    {
+      gcc_checking_assert (TREE_CODE (expr) == TREE_VEC);
+      TREE_VEC_ELT (expr, TREE_VEC_LENGTH (expr) - 1) = ssa;
+      return expr;
+    }
+
+  gcc_checking_assert (types_compatible_p (TREE_TYPE (expr), sizetype));
+  return size_binop (MODIFY_EXPR, ssa, expr);
+}
+
 /* Set size for VARNO corresponding to OSI to VAL if it is the new minimum or
-   maximum.  */
+   maximum.  For static sizes, each element of TREE_VEC is always INTEGER_CST
+   throughout the computation.  For dynamic sizes, each element may either be a
+   gimple variable, a MODIFY_EXPR or a TREE_VEC.  The MODIFY_EXPR is for
+   expressions that need to be gimplified.  TREE_VECs are special, they're
+   emitted only for GIMPLE_PHI and the PHI result variable is the last element
+   of the vector.  */
 
-static inline bool
+static bool
 object_sizes_set (struct object_size_info *osi, unsigned varno, tree val,
 		  tree wholeval)
 {
   int object_size_type = osi->object_size_type;
   object_size osize = object_sizes[object_size_type][varno];
+  bool changed = true;
 
   tree oldval = osize.size;
   tree old_wholeval = osize.wholesize;
 
-  enum tree_code code = object_size_type & OST_MINIMUM ? MIN_EXPR : MAX_EXPR;
+  if (object_size_type & OST_DYNAMIC)
+    {
+      if (bitmap_bit_p (osi->reexamine, varno))
+	{
+	  if (size_unknown_p (val, object_size_type))
+	    {
+	      oldval = object_sizes_get (osi, varno);
+	      old_wholeval = object_sizes_get (osi, varno, true);
+	      bitmap_set_bit (osi->unknowns, SSA_NAME_VERSION (oldval));
+	      bitmap_set_bit (osi->unknowns, SSA_NAME_VERSION (old_wholeval));
+	      bitmap_clear_bit (osi->reexamine, varno);
+	    }
+	  else
+	    {
+	      val = bundle_sizes (oldval, val);
+	      wholeval = bundle_sizes (old_wholeval, wholeval);
+	    }
+	}
+      else
+	{
+	  gcc_checking_assert (size_initval_p (oldval, object_size_type));
+	  gcc_checking_assert (size_initval_p (old_wholeval,
+					       object_size_type));
+	  /* For dynamic object sizes, all object sizes that are not gimple
+	     variables will need to be gimplified.  */
+	  if (TREE_CODE (wholeval) != INTEGER_CST
+	      && !is_gimple_variable (wholeval))
+	    {
+	      bitmap_set_bit (osi->reexamine, varno);
+	      wholeval = bundle_sizes (make_ssa_name (sizetype), wholeval);
+	    }
+	  if (TREE_CODE (val) != INTEGER_CST && !is_gimple_variable (val))
+	    {
+	      bitmap_set_bit (osi->reexamine, varno);
+	      val = bundle_sizes (make_ssa_name (sizetype), val);
+	    }
+	  /* If the new value is a temporary variable, mark it for
+	     reexamination.  */
+	  else if (TREE_CODE (val) == SSA_NAME && !SSA_NAME_DEF_STMT (val))
+	    bitmap_set_bit (osi->reexamine, varno);
+	}
+    }
+  else
+    {
+      enum tree_code code = (object_size_type & OST_MINIMUM
+			     ? MIN_EXPR : MAX_EXPR);
 
-  val = size_binop (code, val, oldval);
-  wholeval = size_binop (code, wholeval, old_wholeval);
+      val = size_binop (code, val, oldval);
+      wholeval = size_binop (code, wholeval, old_wholeval);
+      changed = tree_int_cst_compare (val, oldval) != 0;
+    }
 
   object_sizes[object_size_type][varno].size = val;
   object_sizes[object_size_type][varno].wholesize = wholeval;
-  return tree_int_cst_compare (oldval, val) != 0;
+  return changed;
+}
+
+/* Set temporary SSA names for object size and whole size to resolve dependency
+   loops in dynamic size computation.  */
+
+static inline void
+object_sizes_set_temp (struct object_size_info *osi, unsigned varno)
+{
+  tree val = object_sizes_get (osi, varno);
+
+  if (size_initval_p (val, osi->object_size_type))
+    object_sizes_set (osi, varno,
+		      make_ssa_name (sizetype),
+		      make_ssa_name (sizetype));
 }
 
 /* Initialize OFFSET_LIMIT variable.  */
@@ -218,14 +346,15 @@  static tree
 size_for_offset (tree sz, tree offset, tree wholesize = NULL_TREE)
 {
   gcc_checking_assert (TREE_CODE (offset) == INTEGER_CST);
-  gcc_checking_assert (TREE_CODE (sz) == INTEGER_CST);
   gcc_checking_assert (types_compatible_p (TREE_TYPE (sz), sizetype));
 
   /* For negative offsets, if we have a distinct WHOLESIZE, use it to get a net
      offset from the whole object.  */
-  if (wholesize && tree_int_cst_compare (sz, wholesize))
+  if (wholesize
+      && (TREE_CODE (sz) != INTEGER_CST
+	  || TREE_CODE (wholesize) != INTEGER_CST
+	  || tree_int_cst_compare (sz, wholesize)))
     {
-      gcc_checking_assert (TREE_CODE (wholesize) == INTEGER_CST);
       gcc_checking_assert (types_compatible_p (TREE_TYPE (wholesize),
 					       sizetype));
 
@@ -242,13 +371,16 @@  size_for_offset (tree sz, tree offset, tree wholesize = NULL_TREE)
   if (!types_compatible_p (TREE_TYPE (offset), sizetype))
     fold_convert (sizetype, offset);
 
-  if (integer_zerop (offset))
-    return sz;
+  if (TREE_CODE (offset) == INTEGER_CST)
+    {
+      if (integer_zerop (offset))
+	return sz;
 
-  /* Negative or too large offset even after adjustment, cannot be within
-     bounds of an object.  */
-  if (compare_tree_int (offset, offset_limit) > 0)
-    return size_zero_node;
+      /* Negative or too large offset even after adjustment, cannot be within
+	 bounds of an object.  */
+      if (compare_tree_int (offset, offset_limit) > 0)
+	return size_zero_node;
+    }
 
   return size_binop (MINUS_EXPR, size_binop (MAX_EXPR, sz, offset), offset);
 }
@@ -685,6 +817,205 @@  pass_through_call (const gcall *call)
   return NULL_TREE;
 }
 
+/* Emit PHI nodes for size expressions fo.  */
+
+static void
+emit_phi_nodes (gimple *stmt, tree size, tree wholesize)
+{
+  tree phires = TREE_VEC_ELT (wholesize, TREE_VEC_LENGTH (wholesize) - 1);
+  gphi *wholephi = create_phi_node (phires, gimple_bb (stmt));
+  phires = TREE_VEC_ELT (size, TREE_VEC_LENGTH (size) - 1);
+  gphi *phi = create_phi_node (phires, gimple_bb (stmt));
+  gphi *obj_phi =  as_a <gphi *> (stmt);
+
+  gcc_checking_assert (TREE_CODE (wholesize) == TREE_VEC);
+  gcc_checking_assert (TREE_CODE (size) == TREE_VEC);
+
+  for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++)
+    {
+      gimple_seq seq = NULL;
+      tree wsz = TREE_VEC_ELT (wholesize, i);
+      tree sz = TREE_VEC_ELT (size, i);
+
+      /* If we built an expression, we will need to build statements
+	 and insert them on the edge right away.  */
+      if (!is_gimple_variable (wsz))
+	wsz = force_gimple_operand (wsz, &seq, true, NULL);
+      if (!is_gimple_variable (sz))
+	{
+	  gimple_seq s;
+	  sz = force_gimple_operand (sz, &s, true, NULL);
+	  gimple_seq_add_seq (&seq, s);
+	}
+
+      if (seq)
+	{
+	  edge e = gimple_phi_arg_edge (obj_phi, i);
+
+	  /* Put the size definition before the last statement of the source
+	     block of the PHI edge.  This ensures that any branches at the end
+	     of the source block remain the last statement.  We are OK even if
+	     the last statement is the definition of the object since it will
+	     succeed any definitions that contribute to its size and the size
+	     expression will succeed them too.  */
+	  gimple_stmt_iterator gsi = gsi_last_bb (e->src);
+	  gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
+	}
+
+      add_phi_arg (wholephi, wsz,
+		   gimple_phi_arg_edge (obj_phi, i),
+		   gimple_phi_arg_location (obj_phi, i));
+
+      add_phi_arg (phi, sz,
+		   gimple_phi_arg_edge (obj_phi, i),
+		   gimple_phi_arg_location (obj_phi, i));
+    }
+}
+
+/* Descend through EXPR and return size_unknown if it uses any SSA variable
+   object_size_set or object_size_set_temp generated, which turned out to be
+   size_unknown, as noted in UNKNOWNS.  */
+
+static tree
+propagate_unknowns (object_size_info *osi, tree expr)
+{
+  int object_size_type = osi->object_size_type;
+
+  switch (TREE_CODE (expr))
+    {
+    case SSA_NAME:
+      if (bitmap_bit_p (osi->unknowns, SSA_NAME_VERSION (expr)))
+	return size_unknown (object_size_type);
+      return expr;
+
+    case MIN_EXPR:
+    case MAX_EXPR:
+	{
+	  tree res = propagate_unknowns (osi, TREE_OPERAND (expr, 0));
+	  if (size_unknown_p (res, object_size_type))
+	    return res;
+
+	  res = propagate_unknowns (osi, TREE_OPERAND (expr, 1));
+	  if (size_unknown_p (res, object_size_type))
+	    return res;
+
+	  return expr;
+	}
+    case MODIFY_EXPR:
+	{
+	  tree res = propagate_unknowns (osi, TREE_OPERAND (expr, 1));
+	  if (size_unknown_p (res, object_size_type))
+	    return res;
+	  return expr;
+	}
+    case TREE_VEC:
+      for (int i = 0; i < TREE_VEC_LENGTH (expr); i++)
+	{
+	  tree res = propagate_unknowns (osi, TREE_VEC_ELT (expr, i));
+	  if (size_unknown_p (res, object_size_type))
+	    return res;
+	}
+      return expr;
+    case PLUS_EXPR:
+    case MINUS_EXPR:
+	{
+	  tree res = propagate_unknowns (osi, TREE_OPERAND (expr, 0));
+	  if (size_unknown_p (res, object_size_type))
+	    return res;
+
+	  return expr;
+	}
+    default:
+      return expr;
+    }
+}
+
+/* Walk through size expressions that need reexamination and generate
+   statements for them.  */
+
+static void
+gimplify_size_expressions (object_size_info *osi)
+{
+  int object_size_type = osi->object_size_type;
+  bitmap_iterator bi;
+  unsigned int i;
+  bool changed;
+
+  /* Step 1: Propagate unknowns into expressions.  */
+  bitmap reexamine = BITMAP_ALLOC (NULL);
+  bitmap_copy (reexamine, osi->reexamine);
+  do
+    {
+      changed = false;
+      EXECUTE_IF_SET_IN_BITMAP (reexamine, 0, i, bi)
+	{
+	  object_size cur = object_sizes_get_raw (osi, i);
+
+	  if (size_unknown_p (propagate_unknowns (osi, cur.size),
+			      object_size_type)
+	      || size_unknown_p (propagate_unknowns (osi, cur.wholesize),
+				 object_size_type))
+	    {
+	      object_sizes_set (osi, i,
+				size_unknown (object_size_type),
+				size_unknown (object_size_type));
+	      changed = true;
+	    }
+	}
+      bitmap_copy (reexamine, osi->reexamine);
+    }
+  while (changed);
+
+  /* Release all unknowns.  */
+  EXECUTE_IF_SET_IN_BITMAP (osi->unknowns, 0, i, bi)
+    release_ssa_name (ssa_name (i));
+
+  /* Expand all size expressions to put their definitions close to the objects
+     for whom size is being computed.  */
+  EXECUTE_IF_SET_IN_BITMAP (osi->reexamine, 0, i, bi)
+    {
+      gimple_seq seq = NULL;
+      object_size osize = object_sizes_get_raw (osi, i);
+
+      gimple *stmt = SSA_NAME_DEF_STMT (ssa_name (i));
+      enum gimple_code code = gimple_code (stmt);
+
+      /* PHI nodes need special attention.  */
+      if (code == GIMPLE_PHI)
+	emit_phi_nodes (stmt, osize.size, osize.wholesize);
+      else
+	{
+	  tree size_expr = NULL_TREE;
+
+	  /* Bundle wholesize in with the size to gimplify if needed.  */
+	  if (!is_gimple_variable (osize.wholesize)
+	      && TREE_CODE (osize.wholesize) != INTEGER_CST)
+	    size_expr = size_binop (COMPOUND_EXPR,
+				    osize.wholesize,
+				    osize.size);
+	  else if (!is_gimple_variable (osize.size)
+		   && TREE_CODE (osize.size) != INTEGER_CST)
+	    size_expr = osize.size;
+
+	  if (size_expr)
+	    {
+	      gimple_stmt_iterator gsi;
+	      if (code == GIMPLE_NOP)
+		gsi = gsi_start_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+	      else
+		gsi = gsi_for_stmt (stmt);
+
+	      force_gimple_operand (size_expr, &seq, true, NULL);
+	      gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
+	    }
+	}
+
+      /* We're done, so replace the MODIFY_EXPRs with the SSA names.  */
+      object_sizes_initialize (osi, i,
+			       object_sizes_get (osi, i),
+			       object_sizes_get (osi, i, true));
+    }
+}
 
 /* Compute __builtin_object_size value for PTR and set *PSIZE to
    the resulting value.  If the declared object is known and PDECL
@@ -763,9 +1094,15 @@  compute_builtin_object_size (tree ptr, int object_size_type,
 
       osi.visited = BITMAP_ALLOC (NULL);
       osi.reexamine = BITMAP_ALLOC (NULL);
-      osi.depths = NULL;
-      osi.stack = NULL;
-      osi.tos = NULL;
+
+      if (object_size_type & OST_DYNAMIC)
+	osi.unknowns = BITMAP_ALLOC (NULL);
+      else
+	{
+	  osi.depths = NULL;
+	  osi.stack = NULL;
+	  osi.tos = NULL;
+	}
 
       /* First pass: walk UD chains, compute object sizes that
 	 can be computed.  osi.reexamine bitmap at the end will
@@ -775,6 +1112,14 @@  compute_builtin_object_size (tree ptr, int object_size_type,
       osi.changed = false;
       collect_object_sizes_for (&osi, ptr);
 
+      if (object_size_type & OST_DYNAMIC)
+	{
+	  osi.pass = 1;
+	  gimplify_size_expressions (&osi);
+	  BITMAP_FREE (osi.unknowns);
+	  bitmap_clear (osi.reexamine);
+	}
+
       /* Second pass: keep recomputing object sizes of variables
 	 that need reexamination, until no object sizes are
 	 increased or all object sizes are computed.  */
@@ -1021,6 +1366,25 @@  plus_stmt_object_size (struct object_size_info *osi, tree var, gimple *stmt)
   return reexamine;
 }
 
+static void
+dynamic_object_size (struct object_size_info *osi, tree var,
+		     tree *size, tree *wholesize)
+{
+  int object_size_type = osi->object_size_type;
+
+  if (TREE_CODE (var) == SSA_NAME)
+    {
+      unsigned varno = SSA_NAME_VERSION (var);
+
+      collect_object_sizes_for (osi, var);
+      *size = object_sizes_get (osi, varno);
+      *wholesize = object_sizes_get (osi, varno, true);
+    }
+  else if (TREE_CODE (var) == ADDR_EXPR)
+    addr_object_size (osi, var, object_size_type, size, wholesize);
+  else
+    *size = *wholesize = size_unknown (object_size_type);
+}
 
 /* Compute object_sizes for VAR, defined at STMT, which is
    a COND_EXPR.  Return true if the object size might need reexamination
@@ -1042,6 +1406,33 @@  cond_expr_object_size (struct object_size_info *osi, tree var, gimple *stmt)
   then_ = gimple_assign_rhs2 (stmt);
   else_ = gimple_assign_rhs3 (stmt);
 
+  if (object_size_type & OST_DYNAMIC)
+    {
+      tree then_size, then_wholesize, else_size, else_wholesize;
+
+      dynamic_object_size (osi, then_, &then_size, &then_wholesize);
+      if (!size_unknown_p (then_size, object_size_type))
+	dynamic_object_size (osi, else_, &else_size, &else_wholesize);
+
+      tree cond_size, cond_wholesize;
+      if (size_unknown_p (then_size, object_size_type)
+	  || size_unknown_p (else_size, object_size_type))
+	cond_size = cond_wholesize = size_unknown (object_size_type);
+      else
+	{
+	  cond_size = fold_build3 (COND_EXPR, sizetype,
+				   gimple_assign_rhs1 (stmt),
+				   then_size, else_size);
+	  cond_wholesize = fold_build3 (COND_EXPR, sizetype,
+					gimple_assign_rhs1 (stmt),
+					then_wholesize, else_wholesize);
+	}
+
+      object_sizes_set (osi, varno, cond_size, cond_wholesize);
+
+      return false;
+    }
+
   if (TREE_CODE (then_) == SSA_NAME)
     reexamine |= merge_object_sizes (osi, var, then_);
   else
@@ -1058,6 +1449,44 @@  cond_expr_object_size (struct object_size_info *osi, tree var, gimple *stmt)
   return reexamine;
 }
 
+/* Compute an object size expression for VAR, which is the result of a PHI
+   node.  */
+
+static void
+phi_dynamic_object_size (struct object_size_info *osi, tree var)
+{
+  int object_size_type = osi->object_size_type;
+  unsigned int varno = SSA_NAME_VERSION (var);
+  gimple *stmt = SSA_NAME_DEF_STMT (var);
+  unsigned i, num_args = gimple_phi_num_args (stmt);
+
+  /* The extra space is for the PHI result at the end, which object_sizes_set
+     sets for us.  */
+  tree sizes = make_tree_vec (num_args + 1);
+  tree wholesizes = make_tree_vec (num_args + 1);
+
+  /* Bail out if the size of any of the PHI arguments cannot be
+     determined.  */
+  for (i = 0; i < num_args; i++)
+    {
+      tree rhs = gimple_phi_arg_def (stmt, i);
+      tree size, wholesize;
+
+      dynamic_object_size (osi, rhs, &size, &wholesize);
+
+      if (size_unknown_p (size, object_size_type))
+       break;
+
+      TREE_VEC_ELT (sizes, i) = size;
+      TREE_VEC_ELT (wholesizes, i) = wholesize;
+    }
+
+  if (i < num_args)
+    sizes = wholesizes = size_unknown (object_size_type);
+
+  object_sizes_set (osi, varno, sizes, wholesizes);
+}
+
 /* Compute object sizes for VAR.
    For ADDR_EXPR an object size is the number of remaining bytes
    to the end of the object (where what is considered an object depends on
@@ -1103,6 +1532,9 @@  collect_object_sizes_for (struct object_size_info *osi, tree var)
 	{
 	  /* Found a dependency loop.  Mark the variable for later
 	     re-examination.  */
+	  if (object_size_type & OST_DYNAMIC)
+	    object_sizes_set_temp (osi, varno);
+
 	  bitmap_set_bit (osi->reexamine, varno);
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    {
@@ -1184,6 +1616,12 @@  collect_object_sizes_for (struct object_size_info *osi, tree var)
       {
 	unsigned i;
 
+	if (object_size_type & OST_DYNAMIC)
+	  {
+	    phi_dynamic_object_size (osi, var);
+	    break;
+	  }
+
 	for (i = 0; i < gimple_phi_num_args (stmt); i++)
 	  {
 	    tree rhs = gimple_phi_arg (stmt, i)->def;
@@ -1206,7 +1644,8 @@  collect_object_sizes_for (struct object_size_info *osi, tree var)
   if (! reexamine || object_sizes_unknown_p (object_size_type, varno))
     {
       bitmap_set_bit (computed[object_size_type], varno);
-      bitmap_clear_bit (osi->reexamine, varno);
+      if (!(object_size_type & OST_DYNAMIC))
+	bitmap_clear_bit (osi->reexamine, varno);
     }
   else
     {