@@ -1273,7 +1273,8 @@ ANALYZER_OBJS = \
analyzer/store.o \
analyzer/supergraph.o \
analyzer/svalue.o \
- analyzer/trimmed-graph.o
+ analyzer/trimmed-graph.o \
+ analyzer/trust-boundaries.o
# Language-independent object files.
# We put the *-match.o and insn-*.o files first so that a parallel make
@@ -66,6 +66,10 @@ Wanalyzer-exposure-through-output-file
Common Var(warn_analyzer_exposure_through_output_file) Init(1) Warning
Warn about code paths in which sensitive data is written to a file.
+Wanalyzer-exposure-through-uninit-copy
+Common Var(warn_analyzer_exposure_through_uninit_copy) Init(1) Warning
+Warn about code paths in which sensitive data is copied across a security boundary.
+
Wanalyzer-file-leak
Common Var(warn_analyzer_file_leak) Init(1) Warning
Warn about code paths in which a stdio FILE is not closed.
@@ -81,6 +81,8 @@ event_kind_to_string (enum event_kind ek)
return "EK_CUSTOM";
case EK_STMT:
return "EK_STMT";
+ case EK_REGION_CREATION:
+ return "EK_REGION_CREATION";
case EK_FUNCTION_ENTRY:
return "EK_FUNCTION_ENTRY";
case EK_STATE_CHANGE:
@@ -199,6 +201,79 @@ statement_event::get_desc (bool) const
return label_text::take (xstrdup (pp_formatted_text (&pp)));
}
+/* class region_creation_event : public checker_event. */
+
+region_creation_event::region_creation_event (const region *reg,
+ tree capacity,
+ enum rce_kind kind,
+ location_t loc,
+ tree fndecl,
+ int depth)
+: checker_event (EK_REGION_CREATION, loc, fndecl, depth),
+ m_reg (reg),
+ m_capacity (capacity),
+ m_rce_kind (kind)
+{
+ if (m_rce_kind == RCE_CAPACITY)
+ gcc_assert (capacity);
+}
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ region_creation_event.
+ There are effectively 3 kinds of region_region_event, to
+ avoid combinatorial explosion by trying to convy the
+ information in a single message. */
+
+label_text
+region_creation_event::get_desc (bool can_colorize) const
+{
+ switch (m_rce_kind)
+ {
+ default:
+ gcc_unreachable ();
+
+ case RCE_MEM_SPACE:
+ switch (m_reg->get_memory_space ())
+ {
+ default:
+ return label_text::borrow ("region created here");
+ case MEMSPACE_STACK:
+ return label_text::borrow ("region created on stack here");
+ case MEMSPACE_HEAP:
+ return label_text::borrow ("region created on heap here");
+ }
+ break;
+
+ case RCE_CAPACITY:
+ gcc_assert (m_capacity);
+ if (TREE_CODE (m_capacity) == INTEGER_CST)
+ {
+ unsigned HOST_WIDE_INT hwi = tree_to_uhwi (m_capacity);
+ if (hwi == 1)
+ return make_label_text (can_colorize,
+ "capacity: %wu byte", hwi);
+ else
+ return make_label_text (can_colorize,
+ "capacity: %wu bytes", hwi);
+ }
+ else
+ return make_label_text (can_colorize,
+ "capacity: %qE bytes", m_capacity);
+
+ case RCE_DEBUG:
+ {
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_string (&pp, "region creation: ");
+ m_reg->dump_to_pp (&pp, true);
+ if (m_capacity)
+ pp_printf (&pp, " capacity: %qE", m_capacity);
+ return label_text::take (xstrdup (pp_formatted_text (&pp)));
+ }
+ break;
+ }
+}
+
/* class function_entry_event : public checker_event. */
/* Implementation of diagnostic_event::get_desc vfunc for
@@ -991,6 +1066,35 @@ checker_path::debug () const
}
}
+/* Add region_creation_event instances to this path for REG,
+ describing whether REG is on the stack or heap and what
+ its capacity is (if known).
+ If DEBUG is true, also create an RCE_DEBUG event. */
+
+void
+checker_path::add_region_creation_events (const region *reg,
+ const region_model *model,
+ location_t loc,
+ tree fndecl, int depth,
+ bool debug)
+{
+ tree capacity = NULL_TREE;
+ if (model)
+ if (const svalue *capacity_sval = model->get_capacity (reg))
+ capacity = model->get_representative_tree (capacity_sval);
+
+ add_event (new region_creation_event (reg, capacity, RCE_MEM_SPACE,
+ loc, fndecl, depth));
+
+ if (capacity)
+ add_event (new region_creation_event (reg, capacity, RCE_CAPACITY,
+ loc, fndecl, depth));
+
+ if (debug)
+ add_event (new region_creation_event (reg, capacity, RCE_DEBUG,
+ loc, fndecl, depth));
+}
+
/* Add a warning_event to the end of this path. */
void
@@ -31,6 +31,7 @@ enum event_kind
EK_DEBUG,
EK_CUSTOM,
EK_STMT,
+ EK_REGION_CREATION,
EK_FUNCTION_ENTRY,
EK_STATE_CHANGE,
EK_START_CFG_EDGE,
@@ -58,6 +59,7 @@ extern const char *event_kind_to_string (enum event_kind ek);
custom_event (EK_CUSTOM)
precanned_custom_event
statement_event (EK_STMT)
+ region_creation_event (EK_REGION_CREATION)
function_entry_event (EK_FUNCTION_ENTRY)
state_change_event (EK_STATE_CHANGE)
superedge_event
@@ -194,6 +196,45 @@ public:
const program_state m_dst_state;
};
+/* There are too many combinations to express region creation in one message,
+ so we emit multiple region_creation_event instances when each pertinent
+ region is created.
+
+ This enum distinguishes between the different messages. */
+
+enum rce_kind
+{
+ /* Generate a message based on the memory space of the region
+ e.g. "region created on stack here". */
+ RCE_MEM_SPACE,
+
+ /* Generate a message based on the capacity of the region
+ e.g. "capacity: 100 bytes". */
+ RCE_CAPACITY,
+
+ /* Generate a debug message. */
+ RCE_DEBUG
+};
+
+/* A concrete event subclass describing the creation of a region that
+ is significant for a diagnostic. */
+
+class region_creation_event : public checker_event
+{
+public:
+ region_creation_event (const region *reg,
+ tree capacity,
+ enum rce_kind kind,
+ location_t loc, tree fndecl, int depth);
+
+ label_text get_desc (bool) const FINAL OVERRIDE;
+
+private:
+ const region *m_reg;
+ tree m_capacity;
+ enum rce_kind m_rce_kind;
+};
+
/* An event subclass describing the entry to a function. */
class function_entry_event : public checker_event
@@ -561,6 +602,12 @@ public:
m_events[idx] = new_event;
}
+ void add_region_creation_events (const region *reg,
+ const region_model *model,
+ location_t loc,
+ tree fndecl, int depth,
+ bool debug);
+
void add_final_event (const state_machine *sm,
const exploded_node *enode, const gimple *stmt,
tree var, state_machine::state_t state);
@@ -1219,7 +1219,7 @@ diagnostic_manager::emit_saved_diagnostic (const exploded_graph &eg,
trailing eedge stashed, add any events for it. This is for use
in handling longjmp, to show where a longjmp is rewinding to. */
if (sd.m_trailing_eedge)
- add_events_for_eedge (pb, *sd.m_trailing_eedge, &emission_path);
+ add_events_for_eedge (pb, *sd.m_trailing_eedge, &emission_path, NULL);
emission_path.prepare_for_emission (sd.m_d);
@@ -1266,10 +1266,13 @@ diagnostic_manager::build_emission_path (const path_builder &pb,
checker_path *emission_path) const
{
LOG_SCOPE (get_logger ());
+
+ interesting_t interest;
+ pb.get_pending_diagnostic ()->mark_interesting_stuff (&interest);
for (unsigned i = 0; i < epath.m_edges.length (); i++)
{
const exploded_edge *eedge = epath.m_edges[i];
- add_events_for_eedge (pb, *eedge, emission_path);
+ add_events_for_eedge (pb, *eedge, emission_path, &interest);
}
}
@@ -1577,10 +1580,12 @@ struct null_assignment_sm_context : public sm_context
void
diagnostic_manager::add_events_for_eedge (const path_builder &pb,
const exploded_edge &eedge,
- checker_path *emission_path) const
+ checker_path *emission_path,
+ interesting_t *interest) const
{
const exploded_node *src_node = eedge.m_src;
const program_point &src_point = src_node->get_point ();
+ const int src_stack_depth = src_point.get_stack_depth ();
const exploded_node *dst_node = eedge.m_dest;
const program_point &dst_point = dst_node->get_point ();
const int dst_stack_depth = dst_point.get_stack_depth ();
@@ -1642,6 +1647,29 @@ diagnostic_manager::add_events_for_eedge (const path_builder &pb,
(dst_point.get_supernode ()->get_start_location (),
dst_point.get_fndecl (),
dst_stack_depth));
+ /* Create region_creation_events for on-stack regions within
+ this frame. */
+ if (interest)
+ {
+ unsigned i;
+ const region *reg;
+ FOR_EACH_VEC_ELT (interest->m_region_creation, i, reg)
+ if (const frame_region *frame = reg->maybe_get_frame_region ())
+ if (frame->get_fndecl () == dst_point.get_fndecl ())
+ {
+ const region *base_reg = reg->get_base_region ();
+ if (tree decl = base_reg->maybe_get_decl ())
+ if (DECL_SOURCE_LOCATION (decl) != UNKNOWN_LOCATION)
+ {
+ emission_path->add_region_creation_events
+ (reg, dst_state.m_region_model,
+ DECL_SOURCE_LOCATION (decl),
+ dst_point.get_fndecl (),
+ dst_stack_depth,
+ m_verbosity > 3);
+ }
+ }
+ }
}
break;
case PK_BEFORE_STMT:
@@ -1697,6 +1725,43 @@ diagnostic_manager::add_events_for_eedge (const path_builder &pb,
== dst_node->m_succs[0]->m_dest->get_point ())))
break;
}
+
+ /* Look for changes in dynamic extents, which will identify
+ the creation of heap-based regions and alloca regions. */
+ if (interest)
+ {
+ const region_model *src_model = src_state.m_region_model;
+ const region_model *dst_model = dst_state.m_region_model;
+ if (src_model->get_dynamic_extents ()
+ != dst_model->get_dynamic_extents ())
+ {
+ unsigned i;
+ const region *reg;
+ FOR_EACH_VEC_ELT (interest->m_region_creation, i, reg)
+ {
+ const region *base_reg = reg->get_base_region ();
+ const svalue *old_extents
+ = src_model->get_dynamic_extents (base_reg);
+ const svalue *new_extents
+ = dst_model->get_dynamic_extents (base_reg);
+ if (old_extents == NULL && new_extents != NULL)
+ switch (base_reg->get_kind ())
+ {
+ default:
+ break;
+ case RK_HEAP_ALLOCATED:
+ case RK_ALLOCA:
+ emission_path->add_region_creation_events
+ (reg, dst_model,
+ src_point.get_location (),
+ src_point.get_fndecl (),
+ src_stack_depth,
+ m_verbosity > 3);
+ break;
+ }
+ }
+ }
+ }
}
}
break;
@@ -2001,6 +2066,10 @@ diagnostic_manager::prune_for_sm_diagnostic (checker_path *path,
}
break;
+ case EK_REGION_CREATION:
+ /* Don't filter these. */
+ break;
+
case EK_FUNCTION_ENTRY:
if (m_verbosity < 1)
{
@@ -141,7 +141,8 @@ private:
void add_events_for_eedge (const path_builder &pb,
const exploded_edge &eedge,
- checker_path *emission_path) const;
+ checker_path *emission_path,
+ interesting_t *interest) const;
bool significant_edge_p (const path_builder &pb,
const exploded_edge &eedge) const;
@@ -33,11 +33,41 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-event-id.h"
#include "analyzer/sm.h"
#include "analyzer/pending-diagnostic.h"
+#include "selftest.h"
+#include "tristate.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
#if ENABLE_ANALYZER
namespace ana {
+/* struct interesting_t. */
+
+void
+interesting_t::add_region_creation (const region *reg)
+{
+ gcc_assert (reg);
+ m_region_creation.safe_push (reg);
+}
+
+void
+interesting_t::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ pp_string (pp, "{ region creation: [");
+ unsigned i;
+ const region *reg;
+ FOR_EACH_VEC_ELT (m_region_creation, i, reg)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ reg->dump_to_pp (pp, simple);
+ }
+ pp_string (pp, "]}");
+}
+
/* Generate a label_text by printing FMT.
Use a clone of the global_dc for formatting callbacks.
@@ -23,6 +23,22 @@ along with GCC; see the file COPYING3. If not see
namespace ana {
+/* A bundle of information about things that are of interest to a
+ pending_diagnostic.
+
+ For now, merely the set of regions that are pertinent to the
+ diagnostic, so that we can notify the user about when they
+ were created. */
+
+struct interesting_t
+{
+ void add_region_creation (const region *reg);
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const;
+
+ auto_vec<const region *> m_region_creation;
+};
+
/* Various bundles of information used for generating more precise
messages for events within a diagnostic_path, for passing to the
various "describe_*" vfuncs of pending_diagnostic. See those
@@ -282,6 +298,14 @@ class pending_diagnostic
{
return false;
}
+
+ /* Vfunc for registering additional information of interest to this
+ diagnostic. */
+
+ virtual void mark_interesting_stuff (interesting_t *)
+ {
+ /* Default no-op implementation. */
+ }
};
/* A template to make it easier to make subclasses of pending_diagnostic.
@@ -80,6 +80,17 @@ call_details::call_details (const gcall *call, region_model *model,
}
}
+/* Get any logger associated with this object. */
+
+logger *
+call_details::get_logger () const
+{
+ if (m_ctxt)
+ return m_ctxt->get_logger ();
+ else
+ return NULL;
+}
+
/* Get any uncertainty_t associated with the region_model_context. */
uncertainty_t *
@@ -67,6 +67,7 @@ along with GCC; see the file COPYING3. If not see
#include "stor-layout.h"
#include "attribs.h"
#include "tree-object-size.h"
+#include "analyzer/call-info.h"
#if ENABLE_ANALYZER
@@ -1041,6 +1042,445 @@ region_model::on_stmt_pre (const gimple *stmt,
}
}
+/* An enum for capturing the presence of one of
+ __attribute (("returns_zero_on_failure")) or
+ __attribute (("returns_zero_on_success")). */
+
+enum return_meaning
+{
+ RETURN_MEANING_UNKNOWN, // or no return value
+ RETURN_MEANING_ZERO_ON_FAILURE,
+ RETURN_MEANING_ZERO_ON_SUCCESS
+};
+
+/* Determine if FNDECL has been marked with one of
+ __attribute (("returns_zero_on_failure")) or
+ __attribute (("returns_zero_on_success")). */
+
+static enum return_meaning
+get_return_meaning (tree fndecl)
+{
+ tree attrs = TYPE_ATTRIBUTES (TREE_TYPE (fndecl));
+ if (lookup_attribute ("returns_zero_on_failure", attrs))
+ return RETURN_MEANING_ZERO_ON_FAILURE;
+ if (lookup_attribute ("returns_zero_on_success", attrs))
+ return RETURN_MEANING_ZERO_ON_SUCCESS;
+ return RETURN_MEANING_UNKNOWN;
+}
+
+/* Update this model for an outcome of a call that returns zero. */
+
+void
+region_model::update_for_zero_return (const call_details &cd)
+{
+ if (!cd.get_lhs_type ())
+ return;
+ const svalue *zero
+ = m_mgr->get_or_create_int_cst (cd.get_lhs_type (), 0);
+ /* Need to make this unmergeable to prevent the state-merger code
+ from merging the success and failure outcomes. */
+ set_value (cd.get_lhs_region (),
+ m_mgr->get_or_create_unmergeable (zero),
+ cd.get_ctxt ());
+}
+
+/* Update this model for an outcome of a call that returns non-zero. */
+
+void
+region_model::update_for_nonzero_return (const call_details &cd)
+{
+ if (!cd.get_lhs_type ())
+ return;
+ const svalue *zero
+ = m_mgr->get_or_create_int_cst (cd.get_lhs_type (), 0);
+ const svalue *result
+ = get_store_value (cd.get_lhs_region (), cd.get_ctxt ());
+ add_constraint (result, NE_EXPR, zero, cd.get_ctxt ());
+}
+
+/* Subclass of call_info for a that call that succeeds or fails, where
+ the return value is zero or nonzero to signify success/failure
+ adding a "when `FNDECL' succeeds/fails returning zero/nonzero" message.
+ (giving four combinations: success vs failure and zero vs nonzero).
+
+ This is still abstract: the custom_edge_info::update_model vfunc
+ must be implemented. */
+
+class maybe_returns_zero_call_info : public call_info
+{
+public:
+ maybe_returns_zero_call_info (const call_details &cd,
+ enum return_meaning return_meaning,
+ bool success)
+ : call_info (cd),
+ m_return_meaning (return_meaning),
+ m_success (success)
+ {}
+
+ label_text get_desc (bool can_colorize) const
+ {
+ switch (m_return_meaning)
+ {
+ default:
+ case RETURN_MEANING_UNKNOWN:
+ gcc_unreachable ();
+ break;
+ case RETURN_MEANING_ZERO_ON_FAILURE:
+ if (m_success)
+ return make_label_text (can_colorize,
+ "when %qE succeeds, returning non-zero",
+ get_fndecl ());
+ else
+ return make_label_text (can_colorize,
+ "when %qE fails, returning zero",
+ get_fndecl ());
+ case RETURN_MEANING_ZERO_ON_SUCCESS:
+ if (m_success)
+ return make_label_text (can_colorize,
+ "when %qE succeeds, returning zero",
+ get_fndecl ());
+ else
+ return make_label_text (can_colorize,
+ "when %qE fails, returning non-zero",
+ get_fndecl ());
+ }
+ }
+
+protected:
+ void update_model_for_return_value (region_model *model,
+ region_model_context *ctxt) const
+ {
+ const call_details cd (get_call_details (model, ctxt));
+
+ /* Update return value. */
+ switch (m_return_meaning)
+ {
+ default:
+ case RETURN_MEANING_UNKNOWN:
+ gcc_unreachable ();
+ break;
+ case RETURN_MEANING_ZERO_ON_FAILURE:
+ if (m_success)
+ model->update_for_nonzero_return (cd);
+ else
+ model->update_for_zero_return (cd);
+ break;
+ case RETURN_MEANING_ZERO_ON_SUCCESS:
+ if (m_success)
+ model->update_for_zero_return (cd);
+ else
+ model->update_for_nonzero_return (cd);
+ break;
+ }
+ }
+
+ enum return_meaning m_return_meaning;
+ bool m_success;
+};
+
+/* Concrete custom_edge_info: a copy that succeeds/fails,
+ with an appropriate return value. */
+
+class copy_success : public maybe_returns_zero_call_info
+{
+public:
+ copy_success (const call_details &cd,
+ enum return_meaning return_meaning,
+ const region *sized_dest_reg,
+ const svalue *copied_sval,
+ const region *sized_src_reg)
+ : maybe_returns_zero_call_info (cd, return_meaning, true),
+ m_sized_dest_reg (sized_dest_reg),
+ m_copied_sval (copied_sval),
+ m_sized_src_reg (sized_src_reg)
+ {}
+
+ bool update_model (region_model *model,
+ const exploded_edge *,
+ region_model_context *ctxt) const FINAL OVERRIDE
+ {
+ update_model_for_return_value (model, ctxt);
+ model->set_value (m_sized_dest_reg, m_copied_sval, ctxt, m_sized_src_reg);
+ return true;
+ }
+
+ const region *m_sized_dest_reg;
+ const svalue *m_copied_sval;
+ const region *m_sized_src_reg;
+};
+
+/* Concrete custom_edge_info: a copy that fails,
+ with an appropriate return value. */
+
+class copy_failure : public maybe_returns_zero_call_info
+{
+public:
+ copy_failure (const call_details &cd,
+ enum return_meaning return_meaning)
+ : maybe_returns_zero_call_info (cd, return_meaning, false)
+ {}
+
+ bool update_model (region_model *model,
+ const exploded_edge *,
+ region_model_context *ctxt) const FINAL OVERRIDE
+ {
+ update_model_for_return_value (model, ctxt);
+ /* Leave the destination region untouched. */
+ return true;
+ }
+
+private:
+ enum return_meaning m_return_meaning;
+};
+
+/* The Linux kernel commonly uses
+ min_t([unsigned] long, VAR, sizeof(T));
+ to set an upper bound on the size of a copy_to_user.
+ Attempt to simplify such sizes by trying to get the upper bound as a
+ constant.
+ Return the simplified svalue if possible, or NULL otherwise. */
+
+static const svalue *
+maybe_simplify_upper_bound (const svalue *num_bytes_sval,
+ region_model_manager *mgr)
+{
+ tree type = num_bytes_sval->get_type ();
+ while (const svalue *raw = num_bytes_sval->maybe_undo_cast ())
+ num_bytes_sval = raw;
+ if (const binop_svalue *binop_sval = num_bytes_sval->dyn_cast_binop_svalue ())
+ if (binop_sval->get_op () == MIN_EXPR)
+ if (binop_sval->get_arg1 ()->get_kind () == SK_CONSTANT)
+ {
+ return mgr->get_or_create_cast (type, binop_sval->get_arg1 ());
+ /* TODO: we might want to also capture the constraint
+ when recording the diagnostic, or note that we're using
+ the upper bound. */
+ }
+ return NULL;
+}
+
+/* Attempt to get an upper bound for the size of a copy when simulating a
+ copy function.
+
+ NUM_BYTES_SVAL is the symbolic value for the size of the copy.
+ Use it if it's constant, otherwise try to simplify it. Failing
+ that, use the size of SRC_REG if constant.
+
+ Return a symbolic value for an upper limit on the number of bytes
+ copied, or NULL if no such value could be determined. */
+
+const svalue *
+region_model::maybe_get_copy_bounds (const region *src_reg,
+ const svalue *num_bytes_sval)
+{
+ if (num_bytes_sval->maybe_get_constant ())
+ return num_bytes_sval;
+
+ if (const svalue *simplified
+ = maybe_simplify_upper_bound (num_bytes_sval, m_mgr))
+ num_bytes_sval = simplified;
+
+ if (num_bytes_sval->maybe_get_constant ())
+ return num_bytes_sval;
+
+ /* For now, try just guessing the size as the capacity of the
+ base region of the src.
+ This is a hack; we might get too large a value. */
+ const region *src_base_reg = src_reg->get_base_region ();
+ num_bytes_sval = get_capacity (src_base_reg);
+
+ if (num_bytes_sval->maybe_get_constant ())
+ return num_bytes_sval;
+
+ /* Non-constant: give up. */
+ return NULL;
+}
+
+/* Support for "copy functions".
+
+ Heuristic: if a function has just an access((read_only)),
+ access((write_only)) and a size param (in any order), assume it copies
+ between buffers, and has no side effects. */
+
+/* Struct for capturing the order of the params of a copy function. */
+
+struct copy_fn_details
+{
+ int m_src_arg_idx;
+ int m_dst_arg_idx;
+ int m_sz_arg_idx;
+};
+
+/* Return true if FNDECL is a copy function and populate *OUT.
+ Return false otherwise. */
+
+static bool
+is_copy_function (tree fndecl, copy_fn_details *out)
+{
+ tree fntype = TREE_TYPE (fndecl);
+ /* Must have exactly 3 args. */
+ if (type_num_arguments (fntype) != 3)
+ return false;
+ /* Reject variadic functions. */
+ if (type_argument_type (fntype, 4) == NULL_TREE)
+ return false;
+
+ rdwr_map rwm;
+ init_attr_rdwr_indices (&rwm, TYPE_ATTRIBUTES (fntype));
+
+ if (rwm.elements () != 3)
+ return false;
+
+ int src_ptr_arg_idx = -1;
+ int src_sz_arg_idx = -1;
+ int dst_ptr_arg_idx = -1;
+ int dst_sz_arg_idx = -1;
+
+ for (unsigned arg_idx = 0; arg_idx < 3; arg_idx++)
+ {
+ const attr_access *access = rwm.get (arg_idx);
+ if (!access)
+ return false;
+ switch (access->mode)
+ {
+ default:
+ gcc_unreachable ();
+ case access_none:
+ return false;
+
+ case access_read_only:
+ if (src_ptr_arg_idx != -1
+ && src_ptr_arg_idx != (int)access->ptrarg)
+ /* More than one read_only. */
+ return false;
+ src_ptr_arg_idx = access->ptrarg;
+ src_sz_arg_idx = access->sizarg;
+ break;
+
+ case access_write_only:
+ if (dst_ptr_arg_idx != -1
+ && dst_ptr_arg_idx != (int)access->ptrarg)
+ /* More than one write_only. */
+ return false;
+ dst_ptr_arg_idx = access->ptrarg;
+ dst_sz_arg_idx = access->sizarg;
+ break;
+
+ case access_read_write:
+ case access_deferred:
+ return false;
+ }
+ }
+
+ if (src_ptr_arg_idx == -1
+ || src_sz_arg_idx == -1
+ || dst_ptr_arg_idx == -1
+ || dst_sz_arg_idx == -1
+ || src_sz_arg_idx != dst_sz_arg_idx
+ || src_ptr_arg_idx == src_sz_arg_idx)
+ return false;
+
+ /* The size param must have integer type; this is checked by the attribute
+ handler.
+
+ type_argument_type takes a 1-based argno, rather than a 0-based arg
+ idx. */
+ gcc_assert (INTEGRAL_TYPE_P (type_argument_type (fntype,
+ src_sz_arg_idx + 1)));
+
+ /* If we get here we have a pair:
+ access (read_only, A), access (write_only, A). */
+ out->m_src_arg_idx = src_ptr_arg_idx;
+ out->m_dst_arg_idx = dst_ptr_arg_idx;
+ out->m_sz_arg_idx = src_sz_arg_idx;
+ return true;
+}
+
+/* Update this model assuming that CD is a call to CALLEE_FNDECL, a
+ copy function described by CFD. */
+
+void
+region_model::handle_copy_function (tree callee_fndecl,
+ const call_details &cd,
+ const copy_fn_details &cfd)
+{
+ LOG_SCOPE (cd.get_logger ());
+
+ gcc_assert (callee_fndecl);
+ enum return_meaning return_meaning = get_return_meaning (callee_fndecl);
+
+ const svalue *dest_sval = cd.get_arg_svalue (cfd.m_dst_arg_idx);
+ const svalue *src_sval = cd.get_arg_svalue (cfd.m_src_arg_idx);
+ const svalue *num_bytes_sval = cd.get_arg_svalue (cfd.m_sz_arg_idx);
+
+ const region *dest_reg = deref_rvalue (dest_sval,
+ cd.get_arg_tree (cfd.m_dst_arg_idx),
+ cd.get_ctxt ());
+ const region *src_reg = deref_rvalue (src_sval,
+ cd.get_arg_tree (cfd.m_src_arg_idx),
+ cd.get_ctxt ());
+
+ if (const svalue * bounded_sval = maybe_get_copy_bounds (src_reg,
+ num_bytes_sval))
+ num_bytes_sval = bounded_sval;
+
+ if (tree cst = num_bytes_sval->maybe_get_constant ())
+ if (zerop (cst))
+ /* No-op. */
+ return;
+
+ const region *sized_src_reg = m_mgr->get_sized_region (src_reg,
+ NULL_TREE,
+ num_bytes_sval);
+
+ const svalue *copied_sval = get_store_value (sized_src_reg, cd.get_ctxt ());
+
+ const region *sized_dest_reg = m_mgr->get_sized_region (dest_reg,
+ NULL_TREE,
+ num_bytes_sval);
+
+ /* Heuristic for handling copies that can fail.
+
+ In the Linux kernel, the functions copy_to_user and copy_from_user copy
+ an arbitrary amount of data to/from userspace. They return the amount
+ of uncopied data (i.e. zero means success, nonzero means failure).
+
+ Support this kind of copy function by bifurcating the state into
+ success (all of N was copied) and failure (none was copied). We
+ don't bother with the "only a fraction was copied" case.
+
+ This heuristic should help find problems in error-handling without
+ overcomplicated the analysis. */
+ if (cd.get_ctxt ())
+ {
+ switch (return_meaning)
+ {
+ default:
+ gcc_unreachable ();
+ case RETURN_MEANING_UNKNOWN:
+ /* Don't bifurcate state; assume a full copy. */
+ set_value (sized_dest_reg, copied_sval, cd.get_ctxt (),
+ sized_src_reg);
+ break;
+
+ case RETURN_MEANING_ZERO_ON_FAILURE:
+ case RETURN_MEANING_ZERO_ON_SUCCESS:
+ {
+ /* Bifurcate state, creating a "failure" out-edge. */
+ cd.get_ctxt ()->bifurcate (new copy_failure (cd, return_meaning));
+
+ /* The "unbifurcated" state is the "success" case. */
+ copy_success success (cd, return_meaning,
+ sized_dest_reg,
+ copied_sval,
+ sized_src_reg);
+ success.update_model (this, NULL, cd.get_ctxt ());
+ }
+ break;
+ }
+ }
+}
+
/* Update this model for the CALL stmt, using CTXT to report any
diagnostics - the first half.
@@ -1092,6 +1532,8 @@ region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
{
+ copy_fn_details cfd;
+
/* The various impl_call_* member functions are implemented
in region-model-impl-calls.cc.
Having them split out into separate functions makes it easier
@@ -1247,6 +1689,11 @@ region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
{
/* Handle in "on_call_post". */
}
+ else if (is_copy_function (callee_fndecl, &cfd))
+ {
+ handle_copy_function (callee_fndecl, cd, cfd);
+ return false;
+ }
else if (!fndecl_has_gimple_body_p (callee_fndecl)
&& !DECL_PURE_P (callee_fndecl)
&& !fndecl_built_in_p (callee_fndecl))
@@ -2336,17 +2783,23 @@ region_model::check_region_for_read (const region *src_reg,
/* Set the value of the region given by LHS_REG to the value given
by RHS_SVAL.
- Use CTXT to report any warnings associated with writing to LHS_REG. */
+ Use CTXT to report any warnings associated with writing to LHS_REG.
+ SRC_REG can be NULL, if non-NULL it's a hint about where RHS_SVAL
+ came from, for precision-of-wording in diagnostics. */
void
region_model::set_value (const region *lhs_reg, const svalue *rhs_sval,
- region_model_context *ctxt)
+ region_model_context *ctxt,
+ const region *src_reg)
{
gcc_assert (lhs_reg);
gcc_assert (rhs_sval);
check_region_for_write (lhs_reg, ctxt);
+ if (ctxt && lhs_reg->untrusted_p ())
+ maybe_complain_about_infoleak (lhs_reg, rhs_sval, src_reg, ctxt);
+
m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
ctxt ? ctxt->get_uncertainty () : NULL);
}
@@ -479,6 +479,8 @@ public:
region_model_context *ctxt);
region_model_context *get_ctxt () const { return m_ctxt; }
+ logger *get_logger () const;
+
uncertainty_t *get_uncertainty () const;
tree get_lhs_type () const { return m_lhs_type; }
const region *get_lhs_region () const { return m_lhs_region; }
@@ -509,6 +511,8 @@ private:
const region *m_lhs_region;
};
+struct copy_fn_details;
+
/* A region_model encapsulates a representation of the state of memory, with
a tree of regions, along with their associated values.
The representation is graph-like because values can be pointers to
@@ -591,6 +595,13 @@ class region_model
void impl_call_operator_new (const call_details &cd);
void impl_call_operator_delete (const call_details &cd);
void impl_deallocation_call (const call_details &cd);
+ void handle_copy_function (tree callee_fndecl,
+ const call_details &cd,
+ const copy_fn_details &cfd);
+ const svalue *maybe_get_copy_bounds (const region *src_reg,
+ const svalue *num_bytes_sval);
+ void update_for_zero_return (const call_details &cd);
+ void update_for_nonzero_return (const call_details &cd);
void handle_unrecognized_call (const gcall *call,
region_model_context *ctxt);
@@ -648,7 +659,8 @@ class region_model
region_model_context *ctxt) const;
void set_value (const region *lhs_reg, const svalue *rhs_sval,
- region_model_context *ctxt);
+ region_model_context *ctxt,
+ const region *src_reg = NULL);
void set_value (tree lhs, tree rhs, region_model_context *ctxt);
void clobber_region (const region *reg);
void purge_region (const region *reg);
@@ -813,6 +825,11 @@ class region_model
void check_region_for_read (const region *src_reg,
region_model_context *ctxt) const;
+ void maybe_complain_about_infoleak (const region *dst_reg,
+ const svalue *copied_sval,
+ const region *src_reg,
+ region_model_context *ctxt);
+
/* Storing this here to avoid passing it around everywhere. */
region_model_manager *const m_mgr;
@@ -204,6 +204,34 @@ region::get_memory_space () const
return MEMSPACE_UNKNOWN;
}
+/* Get the address space of this region. */
+
+addr_space_t
+region::get_addr_space () const
+{
+ const region *iter = this;
+ while (iter)
+ {
+ if (iter->m_type)
+ return TYPE_ADDR_SPACE (iter->m_type);
+ switch (iter->get_kind ())
+ {
+ case RK_FIELD:
+ case RK_ELEMENT:
+ case RK_OFFSET:
+ case RK_SIZED:
+ iter = iter->get_parent_region ();
+ continue;
+ case RK_CAST:
+ iter = iter->dyn_cast_cast_region ()->get_original_region ();
+ continue;
+ default:
+ return ADDR_SPACE_GENERIC;
+ }
+ }
+ return ADDR_SPACE_GENERIC;
+}
+
/* Subroutine for use by region_model_manager::get_or_create_initial_value.
Return true if this region has an initial_svalue.
Return false if attempting to use INIT_VAL(this_region) should give
@@ -136,6 +136,7 @@ public:
bool descendent_of_p (const region *elder) const;
const frame_region *maybe_get_frame_region () const;
enum memory_space get_memory_space () const;
+ addr_space_t get_addr_space () const;
bool can_have_initial_svalue_p () const;
tree maybe_get_decl () const;
@@ -189,6 +190,8 @@ public:
const complexity &get_complexity () const { return m_complexity; }
+ bool untrusted_p () const;
+
protected:
region (complexity c, unsigned id, const region *parent, tree type);
@@ -296,6 +299,7 @@ public:
/* Accessors. */
const frame_region *get_calling_frame () const { return m_calling_frame; }
function *get_function () const { return m_fun; }
+ tree get_fndecl () const { return get_function ()->decl; }
int get_index () const { return m_index; }
int get_stack_depth () const { return m_index + 1; }
@@ -558,6 +558,7 @@ public:
hashval_t hash () const;
bool symbolic_p () const;
+ const region *get_base_region () const { return m_base_region; }
void dump_to_pp (pretty_printer *pp, bool simple, bool multiline) const;
void dump (bool simple) const;
new file mode 100644
@@ -0,0 +1,615 @@
+/* Handling of trust boundaries
+ Copyright (C) 2019-2021 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#define INCLUDE_UNIQUE_PTR
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "diagnostic-core.h"
+#include "tree-pretty-print.h"
+#include "diagnostic-metadata.h"
+#include "tristate.h"
+#include "selftest.h"
+#include "function.h"
+#include "json.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "digraph.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+#include "gcc-rich-location.h"
+
+#if ENABLE_ANALYZER
+
+namespace ana {
+
+/* Information of the layout of a RECORD_TYPE, capturing it as a vector
+ of items, where each item is either a field or padding. */
+
+class record_layout
+{
+public:
+ /* An item within a record; either a field, or padding after a field. */
+ struct item
+ {
+ public:
+ item (const bit_range &br,
+ tree field,
+ bool is_padding)
+ : m_bit_range (br),
+ m_field (field),
+ m_is_padding (is_padding)
+ {
+ }
+
+ bit_offset_t get_start_bit_offset () const
+ {
+ return m_bit_range.get_start_bit_offset ();
+ }
+ bit_offset_t get_next_bit_offset () const
+ {
+ return m_bit_range.get_next_bit_offset ();
+ }
+
+ bool contains_p (bit_offset_t offset) const
+ {
+ return m_bit_range.contains_p (offset);
+ }
+
+ void dump_to_pp (pretty_printer *pp) const
+ {
+ if (m_is_padding)
+ pp_printf (pp, "padding after %qD", m_field);
+ else
+ pp_printf (pp, "%qD", m_field);
+ pp_string (pp, ", ");
+ m_bit_range.dump_to_pp (pp);
+ }
+
+ bit_range m_bit_range;
+ tree m_field;
+ bool m_is_padding;
+ };
+
+ record_layout (tree record_type)
+ : m_record_type (record_type)
+ {
+ gcc_assert (TREE_CODE (record_type) == RECORD_TYPE);
+
+ for (tree iter = TYPE_FIELDS (record_type); iter != NULL_TREE;
+ iter = DECL_CHAIN (iter))
+ {
+ if (TREE_CODE (iter) == FIELD_DECL)
+ {
+ int iter_field_offset = int_bit_position (iter);
+ bit_size_t size_in_bits;
+ if (!int_size_in_bits (TREE_TYPE (iter), &size_in_bits))
+ size_in_bits = 0;
+
+ maybe_pad_to (iter_field_offset);
+
+ /* Add field. */
+ m_items.safe_push (item (bit_range (iter_field_offset,
+ size_in_bits),
+ iter, false));
+ }
+ }
+
+ /* Add any trailing padding. */
+ bit_size_t size_in_bits;
+ if (int_size_in_bits (record_type, &size_in_bits))
+ maybe_pad_to (size_in_bits);
+ }
+
+ void dump_to_pp (pretty_printer *pp) const
+ {
+ unsigned i;
+ item *it;
+ FOR_EACH_VEC_ELT (m_items, i, it)
+ {
+ it->dump_to_pp (pp);
+ pp_newline (pp);
+ }
+ }
+
+ DEBUG_FUNCTION void dump () const
+ {
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+ }
+
+ const record_layout::item *get_item_at (bit_offset_t offset) const
+ {
+ unsigned i;
+ item *it;
+ FOR_EACH_VEC_ELT (m_items, i, it)
+ if (it->contains_p (offset))
+ return it;
+ return NULL;
+ }
+
+private:
+ /* Subroutine of ctor. Add padding item to NEXT_OFFSET if necessary. */
+
+ void maybe_pad_to (bit_offset_t next_offset)
+ {
+ if (m_items.length () > 0)
+ {
+ const item &last_item = m_items[m_items.length () - 1];
+ bit_offset_t offset_after_last_item
+ = last_item.get_next_bit_offset ();
+ if (next_offset > offset_after_last_item)
+ {
+ bit_size_t padding_size
+ = next_offset - offset_after_last_item;
+ m_items.safe_push (item (bit_range (offset_after_last_item,
+ padding_size),
+ last_item.m_field, true));
+ }
+ }
+ }
+
+ tree m_record_type;
+ auto_vec<item> m_items;
+};
+
+/* A subclass of pending_diagnostic for complaining about uninitialized data
+ being copied across a trust boundary to an untrusted output
+ (e.g. copy_to_user infoleaks in the Linux kernel). */
+
+class exposure_through_uninit_copy
+ : public pending_diagnostic_subclass<exposure_through_uninit_copy>
+{
+public:
+ exposure_through_uninit_copy (const region *src_region,
+ const region *dest_region,
+ const svalue *copied_sval,
+ region_model_manager *mgr)
+ : m_src_region (src_region),
+ m_dest_region (dest_region),
+ m_copied_sval (copied_sval),
+ m_mgr (mgr)
+ {
+ gcc_assert (m_copied_sval->get_kind () == SK_POISONED
+ || m_copied_sval->get_kind () == SK_COMPOUND);
+ }
+
+ const char *get_kind () const FINAL OVERRIDE
+ {
+ return "exposure_through_uninit_copy";
+ }
+
+ bool operator== (const exposure_through_uninit_copy &other) const
+ {
+ return (m_src_region == other.m_src_region
+ && m_dest_region == other.m_dest_region
+ && m_copied_sval == other.m_copied_sval);
+ }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ diagnostic_metadata m;
+ /* CWE-200: Exposure of Sensitive Information to an Unauthorized Actor. */
+ m.add_cwe (200);
+ enum memory_space mem_space = get_src_memory_space ();
+ bool warned;
+ switch (mem_space)
+ {
+ default:
+ warned = warning_meta
+ (rich_loc, m,
+ OPT_Wanalyzer_exposure_through_uninit_copy,
+ "potential exposure of sensitive information"
+ " by copying uninitialized data across trust boundary");
+ break;
+ case MEMSPACE_STACK:
+ warned = warning_meta
+ (rich_loc, m,
+ OPT_Wanalyzer_exposure_through_uninit_copy,
+ "potential exposure of sensitive information"
+ " by copying uninitialized data from stack across trust boundary");
+ break;
+ case MEMSPACE_HEAP:
+ warned = warning_meta
+ (rich_loc, m,
+ OPT_Wanalyzer_exposure_through_uninit_copy,
+ "potential exposure of sensitive information"
+ " by copying uninitialized data from heap across trust boundary");
+ break;
+ }
+ if (warned)
+ {
+ location_t loc = rich_loc->get_loc ();
+ inform_number_of_uninit_bits (loc);
+ complain_about_uninit_ranges (loc);
+
+ if (mem_space == MEMSPACE_STACK)
+ maybe_emit_fixit_hint ();
+ }
+ return warned;
+ }
+
+ label_text describe_final_event (const evdesc::final_event &) FINAL OVERRIDE
+ {
+ enum memory_space mem_space = get_src_memory_space ();
+ switch (mem_space)
+ {
+ default:
+ return label_text::borrow ("uninitialized data copied here");
+
+ case MEMSPACE_STACK:
+ return label_text::borrow ("uninitialized data copied from stack here");
+
+ case MEMSPACE_HEAP:
+ return label_text::borrow ("uninitialized data copied from heap here");
+ }
+ }
+
+ void mark_interesting_stuff (interesting_t *interest) FINAL OVERRIDE
+ {
+ if (m_src_region)
+ interest->add_region_creation (m_src_region);
+ }
+
+private:
+ enum memory_space get_src_memory_space () const
+ {
+ return m_src_region ? m_src_region->get_memory_space () : MEMSPACE_UNKNOWN;
+ }
+
+ bit_size_t calc_num_uninit_bits () const
+ {
+ switch (m_copied_sval->get_kind ())
+ {
+ default:
+ gcc_unreachable ();
+ break;
+ case SK_POISONED:
+ {
+ const poisoned_svalue *poisoned_sval
+ = as_a <const poisoned_svalue *> (m_copied_sval);
+ gcc_assert (poisoned_sval->get_poison_kind () == POISON_KIND_UNINIT);
+
+ /* Give up if don't have type information. */
+ if (m_copied_sval->get_type () == NULL_TREE)
+ return 0;
+
+ bit_size_t size_in_bits;
+ if (int_size_in_bits (m_copied_sval->get_type (), &size_in_bits))
+ return size_in_bits;
+
+ /* Give up if we can't get the size of the type. */
+ return 0;
+ }
+ break;
+ case SK_COMPOUND:
+ {
+ const compound_svalue *compound_sval
+ = as_a <const compound_svalue *> (m_copied_sval);
+ bit_size_t result = 0;
+ /* Find keys for uninit svals. */
+ for (auto iter : *compound_sval)
+ {
+ const svalue *sval = iter.second;
+ if (const poisoned_svalue *psval
+ = sval->dyn_cast_poisoned_svalue ())
+ if (psval->get_poison_kind () == POISON_KIND_UNINIT)
+ {
+ const binding_key *key = iter.first;
+ const concrete_binding *ckey
+ = key->dyn_cast_concrete_binding ();
+ gcc_assert (ckey);
+ result += ckey->get_size_in_bits ();
+ }
+ }
+ return result;
+ }
+ }
+ }
+
+ void inform_number_of_uninit_bits (location_t loc) const
+ {
+ bit_size_t num_uninit_bits = calc_num_uninit_bits ();
+ if (num_uninit_bits <= 0)
+ return;
+ if (num_uninit_bits % BITS_PER_UNIT == 0)
+ {
+ /* Express in bytes. */
+ byte_size_t num_uninit_bytes = num_uninit_bits / BITS_PER_UNIT;
+ if (num_uninit_bytes == 1)
+ inform (loc, "1 byte is uninitialized");
+ else
+ inform (loc,
+ "%wu bytes are uninitialized", num_uninit_bytes.to_uhwi ());
+ }
+ else
+ {
+ /* Express in bits. */
+ if (num_uninit_bits == 1)
+ inform (loc, "1 bit is uninitialized");
+ else
+ inform (loc,
+ "%wu bits are uninitialized", num_uninit_bits.to_uhwi ());
+ }
+ }
+
+ void complain_about_uninit_ranges (location_t loc) const
+ {
+ if (const compound_svalue *compound_sval
+ = m_copied_sval->dyn_cast_compound_svalue ())
+ {
+ /* Find keys for uninit svals. */
+ auto_vec<const concrete_binding *> uninit_keys;
+ for (auto iter : *compound_sval)
+ {
+ const svalue *sval = iter.second;
+ if (const poisoned_svalue *psval
+ = sval->dyn_cast_poisoned_svalue ())
+ if (psval->get_poison_kind () == POISON_KIND_UNINIT)
+ {
+ const binding_key *key = iter.first;
+ const concrete_binding *ckey
+ = key->dyn_cast_concrete_binding ();
+ gcc_assert (ckey);
+ uninit_keys.safe_push (ckey);
+ }
+ }
+ /* Complain about them in sorted order. */
+ uninit_keys.qsort (concrete_binding::cmp_ptr_ptr);
+
+ std::unique_ptr<record_layout> layout;
+
+ tree type = m_copied_sval->get_type ();
+ if (type && TREE_CODE (type) == RECORD_TYPE)
+ {
+ // (std::make_unique is C++14)
+ layout = std::unique_ptr<record_layout> (new record_layout (type));
+
+ if (0)
+ layout->dump ();
+ }
+
+ unsigned i;
+ const concrete_binding *ckey;
+ FOR_EACH_VEC_ELT (uninit_keys, i, ckey)
+ {
+ bit_offset_t start_bit = ckey->get_start_bit_offset ();
+ bit_offset_t next_bit = ckey->get_next_bit_offset ();
+ complain_about_uninit_range (loc, start_bit, next_bit,
+ layout.get ());
+ }
+ }
+ }
+
+ void complain_about_uninit_range (location_t loc,
+ bit_offset_t start_bit,
+ bit_offset_t next_bit,
+ const record_layout *layout) const
+ {
+ if (layout)
+ {
+ while (start_bit < next_bit)
+ {
+ if (const record_layout::item *item
+ = layout->get_item_at (start_bit))
+ {
+ gcc_assert (start_bit >= item->get_start_bit_offset ());
+ gcc_assert (start_bit < item->get_next_bit_offset ());
+ if (item->get_start_bit_offset () == start_bit
+ && item->get_next_bit_offset () <= next_bit)
+ complain_about_fully_uninit_item (*item);
+ else
+ complain_about_partially_uninit_item (*item);
+ start_bit = item->get_next_bit_offset ();
+ continue;
+ }
+ else
+ break;
+ }
+ }
+
+ if (start_bit >= next_bit)
+ return;
+
+ if (start_bit % 8 == 0 && next_bit % 8 == 0)
+ {
+ /* Express in bytes. */
+ byte_offset_t start_byte = start_bit / 8;
+ byte_offset_t last_byte = (next_bit / 8) - 1;
+ if (last_byte == start_byte)
+ inform (loc,
+ "byte %wu is uninitialized",
+ start_byte.to_uhwi ());
+ else
+ inform (loc,
+ "bytes %wu - %wu are uninitialized",
+ start_byte.to_uhwi (),
+ last_byte.to_uhwi ());
+ }
+ else
+ {
+ /* Express in bits. */
+ bit_offset_t last_bit = next_bit - 1;
+ if (last_bit == start_bit)
+ inform (loc,
+ "bit %wu is uninitialized",
+ start_bit.to_uhwi ());
+ else
+ inform (loc,
+ "bits %wu - %wu are uninitialized",
+ start_bit.to_uhwi (),
+ last_bit.to_uhwi ());
+ }
+ }
+
+ static void
+ complain_about_fully_uninit_item (const record_layout::item &item)
+ {
+ tree field = item.m_field;
+ bit_size_t num_bits = item.m_bit_range.m_size_in_bits;
+ if (item.m_is_padding)
+ {
+ if (num_bits % 8 == 0)
+ {
+ /* Express in bytes. */
+ byte_size_t num_bytes = num_bits / BITS_PER_UNIT;
+ if (num_bytes == 1)
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is uninitialized (1 byte)",
+ field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is uninitialized (%wu bytes)",
+ field, num_bytes.to_uhwi ());
+ }
+ else
+ {
+ /* Express in bits. */
+ if (num_bits == 1)
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is uninitialized (1 bit)",
+ field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is uninitialized (%wu bits)",
+ field, num_bits.to_uhwi ());
+ }
+ }
+ else
+ {
+ if (num_bits % 8 == 0)
+ {
+ /* Express in bytes. */
+ byte_size_t num_bytes = num_bits / BITS_PER_UNIT;
+ if (num_bytes == 1)
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is uninitialized (1 byte)", field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is uninitialized (%wu bytes)",
+ field, num_bytes.to_uhwi ());
+ }
+ else
+ {
+ /* Express in bits. */
+ if (num_bits == 1)
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is uninitialized (1 bit)", field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is uninitialized (%wu bits)",
+ field, num_bits.to_uhwi ());
+ }
+ }
+ }
+
+ static void
+ complain_about_partially_uninit_item (const record_layout::item &item)
+ {
+ tree field = item.m_field;
+ if (item.m_is_padding)
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is partially uninitialized",
+ field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is partially uninitialized",
+ field);
+ /* TODO: ideally we'd describe what parts are uninitialized. */
+ }
+
+ void maybe_emit_fixit_hint () const
+ {
+ if (tree decl = m_src_region->maybe_get_decl ())
+ {
+ gcc_rich_location hint_richloc (DECL_SOURCE_LOCATION (decl));
+ hint_richloc.add_fixit_insert_after (" = {0}");
+ inform (&hint_richloc,
+ "suggest forcing zero-initialization by"
+ " providing a %<{0}%> initializer");
+ }
+ }
+
+private:
+ const region *m_src_region;
+ const region *m_dest_region;
+ const svalue *m_copied_sval;
+ region_model_manager *m_mgr;
+};
+
+/* Return true if any part of SVAL is uninitialized. */
+
+static bool
+contains_uninit_p (const svalue *sval)
+{
+ struct uninit_finder : public visitor
+ {
+ public:
+ uninit_finder () : m_found_uninit (false) {}
+ void visit_poisoned_svalue (const poisoned_svalue *sval)
+ {
+ if (sval->get_poison_kind () == POISON_KIND_UNINIT)
+ m_found_uninit = true;
+ }
+ bool m_found_uninit;
+ };
+
+ uninit_finder v;
+ sval->accept (&v);
+
+ return v.m_found_uninit;
+}
+
+/* Subroutine of region_model::set_value, called when setting DST_REG
+ when writing through an untrusted pointer (and thus crossing a security
+ boundary).
+
+ Check that COPIED_SVAL is fully initialized. If not, complain about
+ an infoleak to CTXT.
+
+ SRC_REG can be NULL; if non-NULL it is used as a hint in the diagnostic
+ as to where COPIED_SVAL came from. */
+
+void
+region_model::maybe_complain_about_infoleak (const region *dst_reg,
+ const svalue *copied_sval,
+ const region *src_reg,
+ region_model_context *ctxt)
+{
+ /* Check for exposure. */
+ if (contains_uninit_p (copied_sval))
+ ctxt->warn (new exposure_through_uninit_copy (src_reg,
+ dst_reg,
+ copied_sval,
+ m_mgr));
+}
+
+} // namespace ana
+
+#endif /* #if ENABLE_ANALYZER */
@@ -438,6 +438,7 @@ Objective-C and Objective-C++ Dialects}.
-Wno-analyzer-double-fclose @gol
-Wno-analyzer-double-free @gol
-Wno-analyzer-exposure-through-output-file @gol
+-Wno-analyzer-exposure-through-uninit-copy @gol
-Wno-analyzer-file-leak @gol
-Wno-analyzer-free-of-non-heap @gol
-Wno-analyzer-malloc-leak @gol
@@ -9393,6 +9394,7 @@ Enabling this option effectively enables the following warnings:
-Wanalyzer-double-fclose @gol
-Wanalyzer-double-free @gol
-Wanalyzer-exposure-through-output-file @gol
+-Wanalyzer-exposure-through-uninit-copy @gol
-Wanalyzer-file-leak @gol
-Wanalyzer-free-of-non-heap @gol
-Wanalyzer-malloc-leak @gol
@@ -9461,6 +9463,18 @@ This diagnostic warns for paths through the code in which a
security-sensitive value is written to an output file
(such as writing a password to a log file).
+@item Wanalyzer-exposure-through-uninit-copy
+@opindex Wanalyzer-exposure-through-uninit-copy
+@opindex Wno-analyzer-exposure-through-uninit-copy
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-exposure-through-uninit-copy}
+to disable it.
+
+This diagnostic warns for ``infoleaks'' - paths through the code in which
+uninitialized values are copied across a security boundary
+(such as copying a partially-initialized struct on the stack
+to an untrusted custom address space).
+
@item -Wno-analyzer-file-leak
@opindex Wanalyzer-file-leak
@opindex Wno-analyzer-file-leak
new file mode 100644
@@ -0,0 +1,98 @@
+#include "analyzer-decls.h"
+
+extern void copy_fn_always_succeeds (void *to, const void *from, long n)
+ __attribute__((access (write_only, 1, 3),
+ access (read_only, 2, 3)
+ ));
+
+extern int copy_fn_zero_on_success (void *to, const void *from, long n)
+ __attribute__((access (write_only, 1, 3),
+ access (read_only, 2, 3),
+ returns_zero_on_success
+ ));
+
+extern int copy_fn_zero_on_failure (void *to, const void *from, long n)
+ __attribute__((access (write_only, 1, 3),
+ access (read_only, 2, 3),
+ returns_zero_on_failure
+ ));
+
+void
+test_1 (int a)
+{
+ int b;
+ copy_fn_always_succeeds (&b, &a, sizeof (a));
+ __analyzer_eval (a == b); /* { dg-warning "TRUE" } */
+}
+
+int
+test_2 (int a)
+{
+ int b = 42;
+ int r = copy_fn_zero_on_success (&b, &a, sizeof (a));
+ if (r)
+ /* Failure. */
+ __analyzer_eval (b == 42); /* { dg-warning "TRUE" } */
+ else
+ /* Success. */
+ __analyzer_eval (b == a); /* { dg-warning "TRUE" } */
+ return r;
+}
+
+int
+test_3 (int a)
+{
+ int b = 42;
+ int r = copy_fn_zero_on_failure (&b, &a, sizeof (a));
+ if (r)
+ /* Success. */
+ __analyzer_eval (b == a); /* { dg-warning "TRUE" } */
+ else
+ /* Failure. */
+ __analyzer_eval (b == 42); /* { dg-warning "TRUE" } */
+ return r;
+}
+
+/* Different param order. */
+
+extern int copy_fn_zero_on_failure_2 (long n, const void *from, void *to)
+ __attribute__((returns_zero_on_failure,
+ access (read_only, 2, 1),
+ access (write_only, 3, 1)));
+int
+test_4 (int a)
+{
+ int b = 42;
+ int r = copy_fn_zero_on_failure_2 (sizeof (a), &a, &b);
+ if (r)
+ /* Success. */
+ __analyzer_eval (b == a); /* { dg-warning "TRUE" } */
+ else
+ /* Failure. */
+ __analyzer_eval (b == 42); /* { dg-warning "TRUE" } */
+ return r;
+}
+
+/* Not a copy-fn: too many arguments. */
+extern int too_many_args (void *to, const void *from, long n, int i)
+ __attribute__((access (write_only, 1, 3),
+ access (read_only, 2, 3)));
+
+int test_5 (int a)
+{
+ int b;
+ too_many_args (&b, &a, sizeof (a), 17);
+ __analyzer_eval (a == b); /* { dg-warning "UNKNOWN" } */
+}
+
+/* Not a copy-fn: variadic. */
+extern int variadic (void *to, const void *from, long n, ...)
+ __attribute__((access (write_only, 1, 3),
+ access (read_only, 2, 3)));
+
+int test_6 (int a)
+{
+ int b;
+ variadic (&b, &a, sizeof (a));
+ __analyzer_eval (a == b); /* { dg-warning "UNKNOWN" } */
+}
new file mode 100644
@@ -0,0 +1,45 @@
+typedef __SIZE_TYPE__ size_t;
+
+#define __user
+
+extern int copy_from_user(void *to, const void __user *from, long n)
+ __attribute__((access (write_only, 1, 3),
+ access (read_only, 2, 3)
+ ));
+
+#define EFAULT 14
+#define EINVAL 22
+
+/* Taken from Linux: fs/binfmt_misc.c (GPL-2.0-only). */
+
+int parse_command(const char __user *buffer, size_t count)
+{
+ char s[4];
+
+ if (count > 3)
+ return -EINVAL;
+ if (copy_from_user(s, buffer, count))
+ return -EFAULT;
+ if (!count)
+ return 0;
+ if (s[count - 1] == '\n') /* { dg-bogus "uninit" } */
+ count--;
+ if (count == 1 && s[0] == '0') /* { dg-bogus "uninit" } */
+ return 1;
+ if (count == 1 && s[0] == '1') /* { dg-bogus "uninit" } */
+ return 2;
+ if (count == 2 && s[0] == '-' && s[1] == '1') /* { dg-bogus "uninit" } */
+ return 3;
+ return -EINVAL;
+}
+
+/* Not using return value from copy_from_user. */
+
+int test_2 (const char __user *buffer, size_t count)
+{
+ char s[4];
+ if (count > 3)
+ return -EINVAL;
+ copy_from_user(s, buffer, count);
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,181 @@
+#include <string.h>
+
+#include "test-uaccess.h"
+
+typedef unsigned char u8;
+typedef unsigned __INT16_TYPE__ u16;
+typedef unsigned __INT32_TYPE__ u32;
+
+struct s1
+{
+ u32 i;
+};
+
+void test_1a (void __user *dst, u32 a)
+{
+ struct s1 s;
+ s.i = a;
+ copy_to_user(dst, &s, sizeof (struct s1)); /* { dg-bogus "" } */
+}
+
+void test_1b (void __user *dst, u32 a)
+{
+ struct s1 s;
+ copy_to_user(dst, &s, sizeof (struct s1)); /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "4 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+}
+
+void test_1c (void __user *dst, u32 a)
+{
+ struct s1 s;
+ memset (&s, 0, sizeof (struct s1));
+ copy_to_user(dst, &s, sizeof (struct s1)); /* { dg-bogus "" } */
+}
+
+void test_1d (void __user *dst, u32 a)
+{
+ struct s1 s = {0};
+ copy_to_user(dst, &s, sizeof (struct s1)); /* { dg-bogus "" } */
+}
+
+struct s2
+{
+ u32 i;
+ u32 j; /* { dg-message "field 'j' is uninitialized \\(4 bytes\\)" } */
+};
+
+void test_2a (void __user *dst, u32 a)
+{
+ struct s2 s; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 8 bytes" "capacity" { target *-*-* } .-1 } */
+ s.i = a;
+ copy_to_user(dst, &s, sizeof (struct s2)); /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "4 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+}
+
+void test_2b (void __user *dst, u32 a)
+{
+ struct s2 s;
+ s.i = a;
+ /* Copy with wrong size (only part of s2). */
+ copy_to_user(dst, &s, sizeof (struct s1));
+}
+
+void test_2d (void __user *dst, u32 a)
+{
+ struct s2 s = {0};
+ s.i = a;
+ copy_to_user(dst, &s, sizeof (struct s2)); /* { dg-bogus" } */
+}
+
+struct empty {};
+
+void test_empty (void __user *dst)
+{
+ struct empty e;
+ copy_to_user(dst, &e, sizeof (struct empty));
+}
+
+union un_a
+{
+ u32 i;
+ u8 j;
+};
+
+/* As above, but in a different order. */
+
+union un_b
+{
+ u8 j;
+ u32 i;
+};
+
+void test_union_1a (void __user *dst, u8 v)
+{
+ union un_a u; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 4 bytes" "capacity" { target *-*-* } .-1 } */
+ u.j = v;
+ copy_to_user(dst, &u, sizeof (union un_a)); /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "3 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+ /* { dg-message "bytes 1 - 3 are uninitialized" "note how much" { target *-*-* } .-2 } */
+}
+
+void test_union_1b (void __user *dst, u8 v)
+{
+ union un_b u; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 4 bytes" "capacity" { target *-*-* } .-1 } */
+ u.j = v;
+ copy_to_user(dst, &u, sizeof (union un_b)); /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "3 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+ /* { dg-message "bytes 1 - 3 are uninitialized" "note how much" { target *-*-* } .-2 } */
+}
+
+void test_union_2a (void __user *dst, u8 v)
+{
+ union un_a u = {0};
+ u.j = v;
+ copy_to_user(dst, &u, sizeof (union un_a));
+}
+
+void test_union_2b (void __user *dst, u8 v)
+{
+ union un_b u = {0};
+ u.j = v;
+ copy_to_user(dst, &u, sizeof (union un_b));
+}
+
+void test_union_3a (void __user *dst, u32 v)
+{
+ union un_a u;
+ u.i = v;
+ copy_to_user(dst, &u, sizeof (union un_a)); /* { dg-bogus "" } */
+}
+
+void test_union_3b (void __user *dst, u32 v)
+{
+ union un_b u;
+ u.i = v;
+ copy_to_user(dst, &u, sizeof (union un_b)); /* { dg-bogus "" } */
+}
+
+void test_union_4a (void __user *dst, u8 v)
+{
+ union un_a u = {0};
+ copy_to_user(dst, &u, sizeof (union un_a)); /* { dg-bogus "" } */
+}
+
+void test_union_4b (void __user *dst, u8 v)
+{
+ union un_b u = {0};
+ copy_to_user(dst, &u, sizeof (union un_b)); /* { dg-bogus "" } */
+}
+
+struct st_union_5
+{
+ union {
+ u8 f1;
+ u32 f2;
+ } u; /* { dg-message "field 'u' is partially uninitialized" } */
+};
+
+void test_union_5 (void __user *dst, u8 v)
+{
+ struct st_union_5 st; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 4 bytes" "capacity" { target *-*-* } .-1 } */
+
+ /* This write only initializes the u8 within the union "u",
+ leaving the remaining 3 bytes uninitialized. */
+ st.u.f1 = v;
+
+ copy_to_user (dst, &st, sizeof(st)); /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "3 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+}
+
+void test_one_byte (void __user *dst)
+{
+ char src; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 1 byte" "capacity" { target *-*-* } .-1 } */
+
+ copy_to_user (dst, &src, sizeof(src)); /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "1 byte is uninitialized" "note how much" { target *-*-* } .-1 } */
+}
new file mode 100644
@@ -0,0 +1,29 @@
+#include <string.h>
+
+#include "test-uaccess.h"
+
+typedef unsigned char u8;
+typedef unsigned __INT16_TYPE__ u16;
+typedef unsigned __INT32_TYPE__ u32;
+
+/* Coverage for the various singular and plural forms of bits, bytes, and fields vs padding. */
+
+struct st
+{
+ u32 a; /* { dg-message "field 'a' is uninitialized \\(4 bytes\\)" } */
+ int b:1; /* { dg-message "field 'b' is uninitialized \\(1 bit\\)" "field" } */
+ /* { dg-message "padding after field 'b' is uninitialized \\(7 bits\\)" "padding" { target *-*-* } .-1 } */
+ u8 d; /* { dg-message "field 'd' is uninitialized \\(1 byte\\)" } */
+ int c:7; /* { dg-message "padding after field 'c' is uninitialized \\(9 bits\\)" } */
+ u16 e; /* { dg-message "padding after field 'e' is uninitialized \\(2 bytes\\)" } */
+};
+
+void test (void __user *dst, u16 v)
+{
+ struct st s; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 12 bytes" "capacity" { target *-*-* } .-1 } */
+ /* { dg-message "suggest forcing zero-initialization by providing a '\\{0\\}' initializer" "fix-it" { target *-*-* } .-2 } */
+ s.e = v;
+ copy_to_user(dst, &s, sizeof (struct st)); /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "10 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+}
new file mode 100644
@@ -0,0 +1,141 @@
+/* Verify that -Wanalyzer-exposure-through-uninit-copy doesn't get confused
+ if size argument to copy_to_user is an upper bound, rather than a
+ constant. */
+
+#include "analyzer-decls.h"
+
+typedef __SIZE_TYPE__ size_t;
+
+#include "test-uaccess.h"
+
+typedef unsigned __INT32_TYPE__ u32;
+
+/* min_t adapted from include/linux/kernel.h. */
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1: __min2; })
+
+struct st
+{
+ u32 a;
+ u32 b;
+};
+
+/* Verify that we cope with min_t. */
+
+void test_1_full_init (void __user *dst, u32 x, u32 y, unsigned long in_sz)
+{
+ struct st s;
+ s.a = x;
+ s.b = y;
+ unsigned long copy_sz = min_t(unsigned long, in_sz, sizeof(s));
+ copy_to_user(dst, &s, copy_sz); /* { dg-bogus "exposure" } */
+}
+
+void test_1_partial_init (void __user *dst, u32 x, u32 y, unsigned long in_sz)
+{
+ struct st s;
+ s.a = x;
+ /* s.y not initialized. */
+ unsigned long copy_sz = min_t(unsigned long, in_sz, sizeof(s));
+ copy_to_user(dst, &s, copy_sz); /* { dg-warning "exposure" } */
+}
+
+/* Constant on LHS rather than RHS. */
+
+void test_2_full_init (void __user *dst, u32 x, u32 y, unsigned long in_sz)
+{
+ struct st s;
+ s.a = x;
+ s.b = y;
+ unsigned long copy_sz = min_t(unsigned long, sizeof(s), in_sz);
+ copy_to_user(dst, &s, copy_sz); /* { dg-bogus "exposure" } */
+}
+
+void test_2_partial_init (void __user *dst, u32 x, u32 y, unsigned long in_sz)
+{
+ struct st s;
+ s.a = x;
+ /* s.y not initialized. */
+ unsigned long copy_sz = min_t(unsigned long, sizeof(s), in_sz);
+ copy_to_user(dst, &s, copy_sz); /* { dg-warning "exposure" } */
+}
+
+/* min_t with various casts. */
+
+void test_3_full_init (void __user *dst, u32 x, u32 y, int in_sz)
+{
+ struct st s;
+ s.a = x;
+ s.b = y;
+ int copy_sz = min_t(unsigned int, in_sz, sizeof(s));
+ copy_to_user(dst, &s, copy_sz); /* { dg-bogus "exposure" } */
+}
+
+void test_3_partial_init (void __user *dst, u32 x, u32 y, int in_sz)
+{
+ struct st s;
+ s.a = x;
+ /* s.y not initialized. */
+ int copy_sz = min_t(unsigned int, in_sz, sizeof(s));
+ copy_to_user(dst, &s, copy_sz); /* { dg-warning "exposure" } */
+}
+
+/* Comparison against an upper bound. */
+
+void test_4_full_init (void __user *dst, u32 x, u32 y, size_t in_sz)
+{
+ struct st s;
+ s.a = x;
+ s.b = y;
+
+ size_t copy_sz = in_sz;
+ if (copy_sz > sizeof(s))
+ copy_sz = sizeof(s);
+
+ copy_to_user(dst, &s, copy_sz); /* { dg-bogus "exposure" } */
+}
+
+void test_4_partial_init (void __user *dst, u32 x, u32 y, size_t in_sz)
+{
+ struct st s;
+ s.a = x;
+ /* s.y not initialized. */
+
+ size_t copy_sz = in_sz;
+ if (copy_sz > sizeof(s))
+ copy_sz = sizeof(s);
+
+ copy_to_user(dst, &s, copy_sz); /* { dg-warning "exposure" } */
+}
+
+/* Comparison against an upper bound with casts. */
+
+void test_5_full_init (void __user *dst, u32 x, u32 y, int in_sz)
+{
+ struct st s;
+ s.a = x;
+ s.b = y;
+
+ int copy_sz = in_sz;
+ if (copy_sz > sizeof(s))
+ copy_sz = sizeof(s);
+ copy_to_user(dst, &s, copy_sz); /* { dg-bogus "exposure" } */
+}
+
+/* Comparison against an upper bound with casts. */
+
+void test_5_partial_init (void __user *dst, u32 x, u32 y, int in_sz)
+{
+ struct st s;
+ s.a = x;
+ /* s.y not initialized. */
+
+ int copy_sz = in_sz;
+ if (copy_sz > sizeof(s))
+ copy_sz = sizeof(s);
+
+ copy_to_user(dst, &s, copy_sz); /* { dg-warning "exposure" "" } */
+}
new file mode 100644
@@ -0,0 +1,35 @@
+#include "test-uaccess.h"
+
+typedef unsigned char u8;
+typedef unsigned __INT16_TYPE__ u16;
+typedef unsigned __INT32_TYPE__ u32;
+
+/* As per infoleak-1.c, but doing it here via deref assignment,
+ rather than copy_to_user. */
+
+struct s1
+{
+ u32 i;
+};
+
+void test_1a (struct s1 __user *dst, u32 a)
+{
+ struct s1 s;
+ s.i = a;
+ *dst = s;
+}
+
+union un_a
+{
+ u32 i;
+ u8 j;
+};
+
+void test_union_1a (union un_a __user *dst, u8 v)
+{
+ union un_a u;
+ u.j = v;
+ *dst = u; /* { dg-warning "potential exposure of sensitive information by copying uninitialized data across trust boundary" "warning" } */
+ /* { dg-message "3 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+ /* { dg-message "bytes 1 - 3 are uninitialized" "note how much" { target *-*-* } .-2 } */
+}
new file mode 100644
@@ -0,0 +1,134 @@
+/* "The sco_sock_getsockopt_old function in net/bluetooth/sco.c in the
+ Linux kernel before 2.6.39 does not initialize a certain structure,
+ which allows local users to obtain potentially sensitive information
+ from kernel stack memory via the SCO_CONNINFO option."
+
+ Fixed e.g. by c4c896e1471aec3b004a693c689f60be3b17ac86 on linux-2.6.39.y
+ in linux-stable. */
+
+#include <string.h>
+
+typedef unsigned char __u8;
+typedef unsigned short __u16;
+
+#include "test-uaccess.h"
+
+/* Adapted from include/asm-generic/uaccess.h. */
+
+#define get_user(x, ptr) \
+({ \
+ /* [...snip...] */ \
+ __get_user_fn(sizeof (*(ptr)), ptr, &(x)); \
+ /* [...snip...] */ \
+})
+
+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+{
+ size = copy_from_user(x, ptr, size);
+ return size ? -1 : size;
+}
+
+/* Adapted from include/linux/kernel.h. */
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1: __min2; })
+
+/* Adapted from include/linux/net.h. */
+
+struct socket {
+ /* [...snip...] */
+ struct sock *sk;
+ /* [...snip...] */
+};
+
+/* Adapted from include/net/bluetooth/sco.h. */
+
+struct sco_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3]; /* { dg-message "padding after field 'dev_class' is uninitialized \\(1 byte\\)" } */
+};
+
+struct sco_conn {
+
+ struct hci_conn *hcon;
+ /* [...snip...] */
+};
+
+#define sco_pi(sk) ((struct sco_pinfo *) sk)
+
+struct sco_pinfo {
+ /* [...snip...] */
+ struct sco_conn *conn;
+};
+
+/* Adapted from include/net/bluetooth/hci_core.h. */
+
+struct hci_conn {
+ /* [...snip...] */
+ __u16 handle;
+ /* [...snip...] */
+ __u8 dev_class[3];
+ /* [...snip...] */
+};
+
+/* Adapted from sco_sock_getsockopt_old in net/bluetooth/sco.c. */
+
+static int sco_sock_getsockopt_old_broken(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ /* [...snip...] */
+ struct sco_conninfo cinfo; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 6 bytes" "capacity" { target *-*-* } .-1 } */
+ /* Note: 40 bits of fields, padded to 48. */
+
+ int len, err = 0;
+
+ /* [...snip...] */
+
+ if (get_user(len, optlen))
+ return -1;
+
+ /* [...snip...] */
+
+ /* case SCO_CONNINFO: */
+ cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
+
+ len = min_t(unsigned int, len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *)&cinfo, len)) /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" { target *-*-* } } */
+ /* { dg-message "1 byte is uninitialized" "how much note" { target *-*-* } .-1 } */
+ err = -1;
+
+ /* [...snip...] */
+}
+
+static int sco_sock_getsockopt_fixed(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ /* [...snip...] */
+ struct sco_conninfo cinfo;
+ /* Note: 40 bits of fields, padded to 48. */
+
+ int len, err = 0;
+
+ /* [...snip...] */
+
+ if (get_user(len, optlen))
+ return -1;
+
+ /* [...snip...] */
+
+ /* case SCO_CONNINFO: */
+ /* Infoleak fixed by this memset call. */
+ memset(&cinfo, 0, sizeof(cinfo));
+ cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
+
+ len = min_t(unsigned int, len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *)&cinfo, len)) /* { dg-bogus "exposure" } */
+ err = -1;
+
+ /* [...snip...] */
+}
new file mode 100644
@@ -0,0 +1,42 @@
+/* Simplified versions of infoleak-CVE-2011-1078-1.c. */
+
+#include <string.h>
+
+typedef unsigned char __u8;
+typedef unsigned short __u16;
+
+#include "test-uaccess.h"
+
+/* Adapted from include/net/bluetooth/sco.h. */
+
+struct sco_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3]; /* { dg-message "padding after field 'dev_class' is uninitialized \\(1 byte\\)" } */
+};
+
+/* Adapted from sco_sock_getsockopt_old in net/bluetooth/sco.c. */
+
+int test_1 (char __user *optval, const struct sco_conninfo *in)
+{
+ struct sco_conninfo cinfo; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 6 bytes" "capacity" { target *-*-* } .-1 } */
+ /* Note: 40 bits of fields, padded to 48. */
+
+ cinfo.hci_handle = in->hci_handle;
+ memcpy(cinfo.dev_class, in->dev_class, 3);
+
+ copy_to_user(optval, &cinfo, sizeof(cinfo)); /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "1 byte is uninitialized" "how much note" { target *-*-* } .-1 } */
+}
+
+int test_2 (char __user *optval, const struct sco_conninfo *in)
+{
+ struct sco_conninfo cinfo;
+ /* Note: 40 bits of fields, padded to 48. */
+
+ memset(&cinfo, 0, sizeof(cinfo));
+ cinfo.hci_handle = in->hci_handle;
+ memcpy(cinfo.dev_class, in->dev_class, 3);
+
+ copy_to_user(optval, &cinfo, sizeof(cinfo)); /* { dg-bogus "" } */
+}
new file mode 100644
@@ -0,0 +1,117 @@
+/* "The yam_ioctl function in drivers/net/hamradio/yam.c in the Linux kernel
+ before 3.12.8 does not initialize a certain structure member, which allows
+ local users to obtain sensitive information from kernel memory by
+ leveraging the CAP_NET_ADMIN capability for an SIOCYAMGCFG ioctl call."
+
+ Fixed e.g. by e7834c71c2cacc621ddc64bd71f83ef2054f6539 on linux-3.12.y
+ in linux-stable. */
+
+#include <string.h>
+
+#include "test-uaccess.h"
+
+/* Adapted from include/linux/yam.h */
+
+struct yamcfg {
+ unsigned int mask; /* Mask of commands */
+ unsigned int iobase; /* IO Base of COM port */
+ unsigned int irq; /* IRQ of COM port */
+ unsigned int bitrate; /* Bit rate of radio port */
+ unsigned int baudrate; /* Baud rate of the RS232 port */
+ unsigned int txdelay; /* TxDelay */
+ unsigned int txtail; /* TxTail */
+ unsigned int persist; /* Persistence */
+ unsigned int slottime; /* Slottime */
+ unsigned int mode; /* mode 0 (simp), 1(Dupl), 2(Dupl+delay) */
+ unsigned int holddly; /* PTT delay in FullDuplex 2 mode */
+};
+
+struct yamdrv_ioctl_cfg {
+ int cmd; /* { dg-message "field 'cmd' is uninitialized \\(4 bytes\\)" } */
+ struct yamcfg cfg;
+};
+
+/* Adapted from include/asm-generic/errno-base.h */
+
+#define EFAULT 14 /* Bad address */
+
+/* Adapted from drivers/net/hamradio/yam.c */
+
+struct yam_port {
+ /* [...snip...] */
+
+ int bitrate;
+ int baudrate;
+ int iobase;
+ int irq;
+ int dupmode;
+
+ /* [...snip...] */
+
+ int txd; /* tx delay */
+ int holdd; /* duplex ptt delay */
+ int txtail; /* txtail delay */
+ int slot; /* slottime */
+ int pers; /* persistence */
+
+ /* [...snip...] */
+};
+
+/* Broken version, leaving yi.cmd uninitialized. */
+
+static int yam_ioctl(/* [...snip...] */
+ void __user *dst, struct yam_port *yp)
+{
+ struct yamdrv_ioctl_cfg yi; /* { dg-message "region created on stack here" "memspace event" } */
+ /* { dg-message "capacity: 48 bytes" "capacity event" { target *-*-* } .-1 } */
+
+ /* [...snip...] */
+
+ /* case SIOCYAMGCFG: */
+ yi.cfg.mask = 0xffffffff;
+ yi.cfg.iobase = yp->iobase;
+ yi.cfg.irq = yp->irq;
+ yi.cfg.bitrate = yp->bitrate;
+ yi.cfg.baudrate = yp->baudrate;
+ yi.cfg.mode = yp->dupmode;
+ yi.cfg.txdelay = yp->txd;
+ yi.cfg.holddly = yp->holdd;
+ yi.cfg.txtail = yp->txtail;
+ yi.cfg.persist = yp->pers;
+ yi.cfg.slottime = yp->slot;
+ if (copy_to_user(dst, &yi, sizeof(struct yamdrv_ioctl_cfg))) /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "4 bytes are uninitialized" "how much note" { target *-*-* } .-1 } */
+ return -EFAULT;
+ /* [...snip...] */
+
+ return 0;
+}
+
+/* Fixed version, with a memset. */
+
+static int yam_ioctl_fixed(/* [...snip...] */
+ void __user *dst, struct yam_port *yp)
+{
+ struct yamdrv_ioctl_cfg yi;
+
+ /* [...snip...] */
+
+ /* case SIOCYAMGCFG: */
+ memset(&yi, 0, sizeof(yi));
+ yi.cfg.mask = 0xffffffff;
+ yi.cfg.iobase = yp->iobase;
+ yi.cfg.irq = yp->irq;
+ yi.cfg.bitrate = yp->bitrate;
+ yi.cfg.baudrate = yp->baudrate;
+ yi.cfg.mode = yp->dupmode;
+ yi.cfg.txdelay = yp->txd;
+ yi.cfg.holddly = yp->holdd;
+ yi.cfg.txtail = yp->txtail;
+ yi.cfg.persist = yp->pers;
+ yi.cfg.slottime = yp->slot;
+ if (copy_to_user(dst, &yi, sizeof(struct yamdrv_ioctl_cfg))) /* { dg-bogus "" } */
+ return -EFAULT;
+ /* [...snip...] */
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,101 @@
+/* "An issue was discovered in drivers/scsi/aacraid/commctrl.c in the
+ Linux kernel before 4.13. There is potential exposure of kernel stack
+ memory because aac_send_raw_srb does not initialize the reply structure."
+
+ Fixed e.g. by 342ffc26693b528648bdc9377e51e4f2450b4860 on linux-4.13.y
+ in linux-stable.
+
+ This is a very simplified version of that code (before and after the fix). */
+
+#include <string.h>
+
+typedef unsigned int __u32;
+typedef unsigned int u32;
+typedef unsigned char u8;
+
+#include "test-uaccess.h"
+
+/* Adapted from include/uapi/linux/types.h */
+
+#define __bitwise
+typedef __u32 __bitwise __le32;
+
+/* Adapted from drivers/scsi/aacraid/aacraid.h */
+
+#define AAC_SENSE_BUFFERSIZE 30
+
+struct aac_srb_reply
+{
+ __le32 status;
+ __le32 srb_status;
+ __le32 scsi_status;
+ __le32 data_xfer_length;
+ __le32 sense_data_size;
+ u8 sense_data[AAC_SENSE_BUFFERSIZE]; /* { dg-message "padding after field 'sense_data' is uninitialized \\(2 bytes\\)" } */
+};
+
+#define ST_OK 0
+#define SRB_STATUS_SUCCESS 0x01
+
+/* Adapted from drivers/scsi/aacraid/commctrl.c */
+
+static int aac_send_raw_srb(/* [...snip...] */
+ void __user *user_reply)
+{
+ u32 byte_count = 0;
+
+ /* [...snip...] */
+
+ struct aac_srb_reply reply; /* { dg-message "region created on stack here" "memspace message" } */
+ /* { dg-message "capacity: 52 bytes" "capacity message" { target *-*-* } .-1 } */
+
+ reply.status = ST_OK;
+
+ /* [...snip...] */
+
+ reply.srb_status = SRB_STATUS_SUCCESS;
+ reply.scsi_status = 0;
+ reply.data_xfer_length = byte_count;
+ reply.sense_data_size = 0;
+ memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
+
+ /* [...snip...] */
+
+ if (copy_to_user(user_reply, &reply, /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" } */
+ /* { dg-message "2 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+ sizeof(struct aac_srb_reply))) {
+ /* [...snip...] */
+ }
+ /* [...snip...] */
+}
+
+static int aac_send_raw_srb_fixed(/* [...snip...] */
+ void __user *user_reply)
+{
+ u32 byte_count = 0;
+
+ /* [...snip...] */
+
+ struct aac_srb_reply reply;
+
+ /* This is the fix. */
+ memset(&reply, 0, sizeof(reply));
+
+ reply.status = ST_OK;
+
+ /* [...snip...] */
+
+ reply.srb_status = SRB_STATUS_SUCCESS;
+ reply.scsi_status = 0;
+ reply.data_xfer_length = byte_count;
+ reply.sense_data_size = 0;
+ memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
+
+ /* [...snip...] */
+
+ if (copy_to_user(user_reply, &reply, /* { dg-bogus "" } */
+ sizeof(struct aac_srb_reply))) {
+ /* [...snip...] */
+ }
+ /* [...snip...] */
+}
new file mode 100644
@@ -0,0 +1,171 @@
+/* "An issue was discovered in drivers/scsi/aacraid/commctrl.c in the
+ Linux kernel before 4.13. There is potential exposure of kernel stack
+ memory because aac_get_hba_info does not initialize the hbainfo structure."
+
+ Fixed e.g. by 342ffc26693b528648bdc9377e51e4f2450b4860 on linux-4.13.y
+ in linux-stable.
+
+ This is a simplified version of that code (before and after the fix). */
+
+#include <string.h>
+
+typedef unsigned int __u32;
+typedef unsigned int u32;
+typedef unsigned char u8;
+
+#include "test-uaccess.h"
+
+/* Adapted from include/uapi/linux/types.h */
+
+#define __bitwise
+typedef __u32 __bitwise __le32;
+
+/* Adapted from drivers/scsi/aacraid/aacraid.h */
+
+struct aac_hba_info {
+
+ u8 driver_name[50]; /* { dg-message "field 'driver_name' is uninitialized \\(50 bytes\\)" } */
+ u8 adapter_number;
+ u8 system_io_bus_number;
+ u8 device_number; /* { dg-message "padding after field 'device_number' is uninitialized \\(3 bytes\\)" } */
+ u32 function_number;
+ u32 vendor_id;
+ u32 device_id;
+ u32 sub_vendor_id;
+ u32 sub_system_id;
+ u32 mapped_base_address_size; /* { dg-message "field 'mapped_base_address_size' is uninitialized \\(4 bytes\\)" } */
+ u32 base_physical_address_high_part;
+ u32 base_physical_address_low_part;
+
+ u32 max_command_size;
+ u32 max_fib_size;
+ u32 max_scatter_gather_from_os;
+ u32 max_scatter_gather_to_fw;
+ u32 max_outstanding_fibs;
+
+ u32 queue_start_threshold;
+ u32 queue_dump_threshold;
+ u32 max_io_size_queued;
+ u32 outstanding_io;
+
+ u32 firmware_build_number;
+ u32 bios_build_number;
+ u32 driver_build_number;
+ u32 serial_number_high_part;
+ u32 serial_number_low_part;
+ u32 supported_options;
+ u32 feature_bits;
+ u32 currentnumber_ports;
+
+ u8 new_comm_interface:1; /* { dg-message "field 'new_comm_interface' is uninitialized \\(1 bit\\)" } */
+ u8 new_commands_supported:1;
+ u8 disable_passthrough:1;
+ u8 expose_non_dasd:1;
+ u8 queue_allowed:1;
+ u8 bled_check_enabled:1;
+ u8 reserved1:1;
+ u8 reserted2:1;
+
+ u32 reserved3[10]; /* { dg-message "field 'reserved3' is uninitialized \\(40 bytes\\)" } */
+
+};
+
+struct aac_dev
+{
+ /* [...snip...] */
+ int id;
+ /* [...snip...] */
+ struct pci_dev *pdev; /* Our PCI interface */
+ /* [...snip...] */
+};
+
+/* Adapted from include/linux/pci.h */
+
+struct pci_dev {
+ /* [...snip...] */
+ struct pci_bus *bus; /* bus this device is on */
+ /* [...snip...] */
+ unsigned int devfn; /* encoded device & function index */
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short subsystem_vendor;
+ unsigned short subsystem_device;
+ /* [...snip...] */
+};
+
+struct pci_bus {
+ /* [...snip...] */
+ unsigned char number; /* bus number */
+ /* [...snip...] */
+};
+
+/* Adapted from drivers/scsi/aacraid/commctrl.c */
+
+static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
+{
+ struct aac_hba_info hbainfo; /* { dg-message "region created on stack here" "memspace message" } */
+ /* { dg-message "capacity: 200 bytes" "capacity message" { target *-*-* } .-1 } */
+
+ hbainfo.adapter_number = (u8) dev->id;
+ hbainfo.system_io_bus_number = dev->pdev->bus->number;
+ hbainfo.device_number = (dev->pdev->devfn >> 3);
+ hbainfo.function_number = (dev->pdev->devfn & 0x0007);
+
+ hbainfo.vendor_id = dev->pdev->vendor;
+ hbainfo.device_id = dev->pdev->device;
+ hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
+ hbainfo.sub_system_id = dev->pdev->subsystem_device;
+
+ if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) { /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "177 bytes are uninitialized" "how much" { target *-*-* } .-1 } */
+ /* [...snip...] */
+ }
+
+ return 0;
+}
+
+static int aac_get_hba_info_fixed(struct aac_dev *dev, void __user *arg)
+{
+ struct aac_hba_info hbainfo;
+
+ memset(&hbainfo, 0, sizeof(hbainfo));
+ hbainfo.adapter_number = (u8) dev->id;
+ hbainfo.system_io_bus_number = dev->pdev->bus->number;
+ hbainfo.device_number = (dev->pdev->devfn >> 3);
+ hbainfo.function_number = (dev->pdev->devfn & 0x0007);
+
+ hbainfo.vendor_id = dev->pdev->vendor;
+ hbainfo.device_id = dev->pdev->device;
+ hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
+ hbainfo.sub_system_id = dev->pdev->subsystem_device;
+
+ if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) { /* { dg-bogus "" } */
+ /* [...snip...] */
+ }
+
+ return 0;
+}
+
+/* An alternate fix using "= {0}" rather than memset. */
+
+static int aac_get_hba_info_fixed_alt(struct aac_dev *dev, void __user *arg)
+{
+ struct aac_hba_info hbainfo = {0};
+
+ memset(&hbainfo, 0, sizeof(hbainfo));
+ hbainfo.adapter_number = (u8) dev->id;
+ hbainfo.system_io_bus_number = dev->pdev->bus->number;
+ hbainfo.device_number = (dev->pdev->devfn >> 3);
+ hbainfo.function_number = (dev->pdev->devfn & 0x0007);
+
+ hbainfo.vendor_id = dev->pdev->vendor;
+ hbainfo.device_id = dev->pdev->device;
+ hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
+ hbainfo.sub_system_id = dev->pdev->subsystem_device;
+
+ if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) { /* { dg-bogus "" } */
+ /* [...snip...] */
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,162 @@
+/* Adapted and simplified decls from linux kernel headers. */
+
+typedef unsigned char u8;
+typedef unsigned __INT16_TYPE__ u16;
+typedef unsigned __INT32_TYPE__ u32;
+typedef __SIZE_TYPE__ size_t;
+
+#define EFAULT 14
+
+#include "test-uaccess.h"
+
+typedef unsigned int gfp_t;
+#define GFP_KERNEL 0
+
+void kfree(const void *);
+void *kmalloc(size_t size, gfp_t flags)
+ __attribute__((malloc (kfree)));
+
+/* Adapted from antipatterns.ko:infoleak.c (GPL-v2.0). */
+
+struct infoleak_buf
+{
+ char buf[256];
+};
+
+int infoleak_stack_no_init(void __user *dst)
+{
+ struct infoleak_buf st; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 256 bytes" "capacity" { target *-*-* } .-1 } */
+
+ /* No initialization of "st" at all. */
+ if (copy_to_user(dst, &st, sizeof(st))) /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "256 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+ return -EFAULT;
+ return 0;
+}
+
+int infoleak_heap_no_init(void __user *dst)
+{
+ struct infoleak_buf *heapbuf = kmalloc(sizeof(*heapbuf), GFP_KERNEL);
+ /* No initialization of "heapbuf" at all. */
+
+ /* TODO: we also don't check that heapbuf could be NULL when copying
+ from it. */
+ if (copy_to_user(dst, heapbuf, sizeof(*heapbuf))) /* { dg-warning "exposure" "warning" { xfail *-*-* } } */
+ /* TODO(xfail). */
+ return -EFAULT; /* { dg-warning "leak of 'heapbuf'" } */
+
+ kfree(heapbuf);
+ return 0;
+}
+
+struct infoleak_2
+{
+ u32 a;
+ u32 b; /* { dg-message "field 'b' is uninitialized \\(4 bytes\\)" } */
+};
+
+int infoleak_stack_missing_a_field(void __user *dst, u32 v)
+{
+ struct infoleak_2 st; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 8 bytes" "capacity" { target *-*-* } .-1 } */
+
+ st.a = v;
+ /* No initialization of "st.b". */
+ if (copy_to_user(dst, &st, sizeof(st))) /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "4 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+ return -EFAULT;
+ return 0;
+}
+
+int infoleak_heap_missing_a_field(void __user *dst, u32 v)
+{
+ struct infoleak_2 *heapbuf = kmalloc(sizeof(*heapbuf), GFP_KERNEL);
+ heapbuf->a = v; /* { dg-warning "dereference of possibly-NULL 'heapbuf'" } */
+ /* No initialization of "heapbuf->b". */
+ if (copy_to_user(dst, heapbuf, sizeof(*heapbuf))) /* { dg-warning "exposure" "warning" { xfail *-*-* } } */
+ /* TODO(xfail). */
+ {
+ kfree(heapbuf);
+ return -EFAULT;
+ }
+ kfree(heapbuf);
+ return 0;
+}
+
+struct infoleak_3
+{
+ u8 a; /* { dg-message "padding after field 'a' is uninitialized \\(3 bytes\\)" } */
+ /* padding here */
+ u32 b;
+};
+
+int infoleak_stack_padding(void __user *dst, u8 p, u32 q)
+{
+ struct infoleak_3 st; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 8 bytes" "capacity" { target *-*-* } .-1 } */
+
+ st.a = p;
+ st.b = q;
+ /* No initialization of padding. */
+ if (copy_to_user(dst, &st, sizeof(st))) /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "3 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+ return -EFAULT;
+ return 0;
+}
+
+int infoleak_stack_unchecked_err(void __user *dst, void __user *src)
+{
+ struct infoleak_buf st; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 256 bytes" "capacity" { target *-*-* } .-1 } */
+
+ /*
+ * If the copy_from_user call fails, then st is still uninitialized,
+ * and if the copy_to_user call succeds, we have an infoleak.
+ */
+ int err = copy_from_user (&st, src, sizeof(st)); /* { dg-message "when 'copy_from_user' fails" } */
+ err |= copy_to_user (dst, &st, sizeof(st)); /* { dg-warning "exposure" "warning" } */
+ /* { dg-message "256 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+ /* Actually, it's *up to* 256 bytes. */
+
+ if (err)
+ return -EFAULT;
+ return 0;
+}
+
+struct infoleak_4
+{
+ union {
+ u8 f1;
+ u32 f2;
+ } u;
+};
+
+int infoleak_stack_union(void __user *dst, u8 v)
+{
+ struct infoleak_4 st;
+ /*
+ * This write only initializes the u8 within the union "u",
+ * leaving the remaining 3 bytes uninitialized.
+ */
+ st.u.f1 = v;
+ if (copy_to_user(dst, &st, sizeof(st))) /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "3 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+ return -EFAULT;
+ return 0;
+}
+
+struct infoleak_5
+{
+ void *ptr;
+};
+
+int infoleak_stack_kernel_ptr(void __user *dst, void *kp)
+{
+ struct infoleak_5 st;
+ /* This writes a kernel-space pointer into a user space buffer. */
+ st.ptr = kp;
+ if (copy_to_user(dst, &st, sizeof(st))) // TODO: we don't complain about this yet
+ return -EFAULT;
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,22 @@
+#include <string.h>
+
+#include "test-uaccess.h"
+
+typedef unsigned char u8;
+typedef unsigned int u32;
+
+struct st
+{
+ u8 i; /* { dg-message "padding after field 'i' is uninitialized \\(3 bytes\\)" } */
+ u32 j; /* { dg-message "field 'j' is uninitialized \\(4 bytes\\)" } */
+};
+
+void test (void __user *dst, u8 a)
+{
+ struct st s; /* { dg-message "region created on stack here" "where" } */
+ /* { dg-message "capacity: 8 bytes" "capacity" { target *-*-* } .-1 } */
+ /* { dg-message "suggest forcing zero-initialization by providing a '.0.' initializer" "fix-it hint" { target *-*-* } .-2 } */
+ s.i = a;
+ copy_to_user(dst, &s, sizeof (struct st)); /* { dg-warning "potential exposure of sensitive information by copying uninitialized data from stack" "warning" } */
+ /* { dg-message "7 bytes are uninitialized" "note how much" { target *-*-* } .-1 } */
+}
new file mode 100644
@@ -0,0 +1,78 @@
+/* Reduced from infoleak false positive seen on Linux kernel with
+ net/ethtool/ioctl.c */
+
+typedef signed char __s8;
+typedef unsigned char __u8;
+typedef unsigned int __u32;
+typedef __s8 s8;
+typedef __u32 u32;
+enum { false = 0, true = 1 };
+typedef unsigned long __kernel_ulong_t;
+typedef __kernel_ulong_t __kernel_size_t;
+typedef _Bool bool;
+typedef __kernel_size_t size_t;
+
+void *memset(void *s, int c, size_t n);
+
+extern bool
+check_copy_size(const void *addr, size_t bytes, bool is_source);
+extern unsigned long
+_copy_from_user(void *, const void *, unsigned long);
+extern unsigned long
+_copy_to_user(void *, const void *, unsigned long);
+
+static inline
+__attribute__((__always_inline__)) unsigned long
+copy_from_user(void *to, const void *from, unsigned long n) {
+ if (__builtin_expect(!!(check_copy_size(to, n, false)), 1))
+ n = _copy_from_user(to, from, n);
+ return n;
+}
+static inline
+__attribute__((__always_inline__)) unsigned long
+copy_to_user(void *to, const void *from, unsigned long n) {
+ if (__builtin_expect(!!(check_copy_size(from, n, true)), 1))
+ n = _copy_to_user(to, from, n);
+ return n;
+}
+enum ethtool_link_mode_bit_indices {
+ __ETHTOOL_LINK_MODE_MASK_NBITS = 92
+};
+struct ethtool_link_settings {
+ __u32 cmd;
+ /* [...snip...] */
+ __s8 link_mode_masks_nwords;
+ /* [...snip...] */
+};
+
+struct ethtool_link_ksettings {
+ struct ethtool_link_settings base;
+ u32 lanes;
+};
+
+int ethtool_get_link_ksettings(void *useraddr) {
+ int err = 0;
+ struct ethtool_link_ksettings link_ksettings;
+
+ if (copy_from_user(&link_ksettings.base, useraddr,
+ sizeof(link_ksettings.base)))
+ return -14;
+
+ if ((((__ETHTOOL_LINK_MODE_MASK_NBITS) + (32) - 1) / (32)) !=
+ link_ksettings.base.link_mode_masks_nwords) {
+
+ memset(&link_ksettings, 0, sizeof(link_ksettings));
+ link_ksettings.base.cmd = 0x0000004c;
+
+ link_ksettings.base.link_mode_masks_nwords =
+ -((s8)(((__ETHTOOL_LINK_MODE_MASK_NBITS) + (32) - 1) / (32)));
+
+ if (copy_to_user(useraddr, &link_ksettings.base,
+ sizeof(link_ksettings.base)))
+ return -14;
+
+ return 0;
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,39 @@
+/* Reduced from infoleak false positive in drivers/vfio/vfio_iommu_type1.c */
+
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+unsigned long
+copy_from_user(void *to, const void *from, unsigned long n);
+
+unsigned long
+copy_to_user(void *to, const void *from, unsigned long n);
+
+struct vfio_iommu_type1_info {
+ u32 argsz;
+ u32 flags;
+ u64 iova_pgsizes;
+ u32 cap_offset;
+ /* bytes 20-23 are padding. */
+};
+
+int vfio_iommu_type1_get_info(unsigned long arg)
+{
+ struct vfio_iommu_type1_info info;
+ unsigned long minsz = 16;
+
+ if (copy_from_user(&info, (void *)arg, 16))
+ return -14;
+
+ if (info.argsz < 16)
+ return -22;
+
+ if (info.argsz >= 20) {
+ minsz = 20;
+ info.cap_offset = 0;
+ }
+
+ /* The padding bytes (20-23) are uninitialized, but can't be written
+ back, since minsz is either 16 or 20. */
+ return copy_to_user((void *)arg, &info, minsz) ? -14 : 0; /* { dg-bogus "exposure" } */
+}