@@ -140,7 +140,7 @@ def arch_canonicalize(arch, isa_spec):
any_change = True
# Single letter extension might appear in the long_exts list,
- # becasue we just append extensions list to the arch string.
+ # because we just append extensions list to the arch string.
std_exts += list(filter(lambda x:len(x) == 1, long_exts))
def longext_sort (exts):
@@ -794,7 +794,7 @@
})
;; -------------------------------------------------------------------------
-;; Truncation to a mode whose inner mode size is an eigth of mode's.
+;; Truncation to a mode whose inner mode size is an eighth of mode's.
;; We emulate this with three consecutive vncvts.
;; -------------------------------------------------------------------------
(define_expand "trunc<mode><v_oct_trunc>2"
@@ -38,7 +38,7 @@ along with GCC; see the file COPYING3. If not see
vadd.vv (use VLMAX) --- PLUS_EXPR
vse8.v (use avl = r136) --- IFN_MASK_LEN_STORE
- NO AVL propation:
+ NO AVL propagation:
vsetvli a5, a4, ta
vle8.v v1
@@ -136,7 +136,7 @@ avl_can_be_propagated_p (rtx_insn *rinsn)
vcompress v1, v2, v0 with avl = 4, v1 = {0x8, 0x2, 0x3, 0x4}.
vcompress v1, v2, v0 with avl = 2, v1 will be unchanged.
Thus, we cannot propagate avl of vcompress because it may has
- senmatics change to the result. */
+ semantics change to the result. */
return get_attr_type (rinsn) != TYPE_VGATHER
&& get_attr_type (rinsn) != TYPE_VSLIDEDOWN
&& get_attr_type (rinsn) != TYPE_VISLIDE1DOWN
@@ -296,7 +296,7 @@ pass_avlprop::get_preferred_avl (
return NULL_RTX;
}
-/* This is a straight forward pattern ALWAYS in paritial auto-vectorization:
+/* This is a straight forward pattern ALWAYS in partial auto-vectorization:
VL = SELECT_AVL (AVL, ...)
V0 = MASK_LEN_LOAD (..., VL)
@@ -394,7 +394,7 @@ pass_avlprop::get_vlmax_ta_preferred_avl (insn_info *insn) const
if (!def1->insn ()->is_real ())
return NULL_RTX;
- /* FIXME: We only all AVL propation within a block which should
+ /* FIXME: We only all AVL propagation within a block which should
be totally enough for vectorized codes.
TODO: We can enhance it here for intrinsic codes in the future
@@ -39,7 +39,7 @@ ADJUST_FLOAT_FORMAT (BF, &arm_bfloat_half_format);
|BI |RVVM1BI |RVVMF2BI |RVVMF4BI |RVVMF8BI |RVVMF16BI |RVVMF32BI |RVVMF64BI | */
/* For RVV modes, each boolean value occupies 1-bit.
- 4th argument is specify the minmial possible size of the vector mode,
+ 4th argument specifies the minimal possible size of the vector mode,
and will adjust to the right size by ADJUST_BYTESIZE. */
VECTOR_BOOL_MODE (RVVM1BI, 64, BI, 8);
VECTOR_BOOL_MODE (RVVMF2BI, 32, BI, 4);
@@ -424,7 +424,7 @@ VLS_MODES (1024); /* V1024QI V512HI V256SI V128DI V512HF V256SF V128DF */
VLS_MODES (2048); /* V2048QI V1024HI V512SI V256DI V1024HF V512SF V256DF */
VLS_MODES (4096); /* V4096QI V2048HI V1024SI V512DI V2048HF V1024SF V512DF */
-/* TODO: According to RISC-V 'V' ISA spec, the maximun vector length can
+/* TODO: According to RISC-V 'V' ISA spec, the maximum vector length can
be 65536 for a single vector register which means the vector mode in
GCC can be maximum = 65536 * 8 bits (LMUL=8).
However, 'GET_MODE_SIZE' is using poly_uint16/unsigned short which will
@@ -133,9 +133,9 @@ enum rvv_vector_bits_enum {
#define TARGET_ZICOND_LIKE (TARGET_ZICOND || (TARGET_XVENTANACONDOPS && TARGET_64BIT))
-/* Bit of riscv_zvl_flags will set contintuly, N-1 bit will set if N-bit is
+/* Bit of riscv_zvl_flags will set continually, N-1 bit will set if N-bit is
set, e.g. MASK_ZVL64B has set then MASK_ZVL32B is set, so we can use
- popcount to caclulate the minimal VLEN. */
+ popcount to calculate the minimal VLEN. */
#define TARGET_MIN_VLEN \
((riscv_zvl_flags == 0) \
? 0 \
@@ -147,7 +147,7 @@ enum rvv_vector_bits_enum {
? 0 \
: 32 << (__builtin_popcount (opts->x_riscv_zvl_flags) - 1))
-/* The maximmum LMUL according to user configuration. */
+/* The maximum LMUL according to user configuration. */
#define TARGET_MAX_LMUL \
(int) (rvv_max_lmul == RVV_DYNAMIC ? RVV_M8 : rvv_max_lmul)
@@ -358,7 +358,7 @@ enum insn_flags : unsigned int
/* Means using VUNDEF for merge operand. */
USE_VUNDEF_MERGE_P = 1 << 5,
- /* flags for tail policy and mask plicy operands. */
+ /* flags for tail policy and mask policy operands. */
/* Means the tail policy is TAIL_UNDISTURBED. */
TU_POLICY_P = 1 << 6,
/* Means the tail policy is default (return by get_prefer_tail_policy). */
@@ -493,7 +493,7 @@ enum insn_type : unsigned int
CPOP_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | UNARY_OP_P
| VTYPE_MODE_FROM_OP1_P,
- /* For mask instrunctions, no tail and mask policy operands. */
+ /* For mask instructions, no tail and mask policy operands. */
UNARY_MASK_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
| USE_VUNDEF_MERGE_P | UNARY_OP_P,
BINARY_MASK_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
@@ -175,8 +175,8 @@ riscv_sr_match_epilogue (void)
prologue instructions but not the epilogue then we might have the case
where the epilogue has been optimized out due to a call to a no-return
function. In this case we might be able to remove the prologue too -
- that's what this function does. PROLOGUE is the matched prolgoue
- instruction, by the time this function returns the progloue instruction
+ that's what this function does. PROLOGUE is the matched prologue
+ instruction, by the time this function returns the prologue instruction
may have been removed. */
static void
@@ -1096,7 +1096,7 @@ expand_block_move (rtx dst_in, rtx src_in, rtx length_in)
/* If a single scalar load / store pair can do the job, leave it
to the scalar code to do that. */
/* ??? If fast unaligned access is supported, the scalar code could
- use suitably sized scalars irrespective of alignemnt. If that
+ use suitably sized scalars irrespective of alignment. If that
gets fixed, we have to adjust the test here. */
if (pow2p_hwi (length) && length <= potential_ew)
@@ -1146,7 +1146,7 @@ expand_block_move (rtx dst_in, rtx src_in, rtx length_in)
if (riscv_vector::get_vector_mode (elem_mode,
nunits).exists (&vmode))
break;
- /* Since we don't have a mode that exactlty matches the transfer
+ /* Since we don't have a mode that exactly matches the transfer
size, we'll need to use pred_store, which is not available
for all vector modes, but only iE_RVV_M* modes, hence trying
to find a vector mode for a merely rounded-up size is
@@ -1165,7 +1165,7 @@ expand_block_move (rtx dst_in, rtx src_in, rtx length_in)
}
/* The RVVM8?I modes are notionally 8 * BYTES_PER_RISCV_VECTOR bytes
- wide. BYTES_PER_RISCV_VECTOR can't be eavenly divided by
+ wide. BYTES_PER_RISCV_VECTOR can't be evenly divided by
the sizes of larger element types; the LMUL factor of 8 can at
the moment be divided by the SEW, with SEW of up to 8 bytes,
but there are reserved encodings so there might be larger
@@ -56,7 +56,7 @@ using namespace riscv_vector;
namespace riscv_vector {
-/* Return true if NUNTIS <=31 so that we can use immediate AVL in vsetivli. */
+/* Return true if NUNITS <=31 so that we can use immediate AVL in vsetivli. */
bool
imm_avl_p (machine_mode mode)
{
@@ -533,8 +533,8 @@ rvv_builder::is_repeating_sequence ()
for merging b we need mask 010101....
Foreach element in the npattern, we need to build a mask in scalar register.
- Mostely we need 3 instructions (aka COST = 3), which is consist of 2 scalar
- instruction and 1 scalar move to v0 register. Finally we need vector merge
+ Mostly we need 3 instructions (aka COST = 3), which consists of 2 scalar
+ instructions and 1 scalar move to v0 register. Finally we need vector merge
to merge them.
lui a5, #imm
@@ -731,7 +731,7 @@ rvv_builder::single_step_npatterns_p () const
CONST VECTOR: {-4, 4,-3, 5,-2, 6,-1, 7, ...}
VID : { 0, 1, 2, 3, 4, 5, 6, 7, ... }
DIFF(MINUS) : {-4, 3,-5,-2,-6, 1,-7, 0, ... }
- The diff sequence {-4, 3} is not repated in the npattern and
+ The diff sequence {-4, 3} is not repeated in the npattern and
return FALSE for case 2. */
bool
rvv_builder::npatterns_vid_diff_repeated_p () const
@@ -925,7 +925,7 @@ calculate_ratio (unsigned int sew, enum vlmul_type vlmul)
}
/* SCALABLE means that the vector-length is agnostic (run-time invariant and
- compile-time unknown). ZVL meands that the vector-length is specific
+ compile-time unknown). ZVL means that the vector-length is specific
(compile-time known by march like zvl*b). Both SCALABLE and ZVL are doing
auto-vectorization using VLMAX vsetvl configuration. */
static bool
@@ -1237,7 +1237,7 @@ expand_const_vector (rtx target, rtx src)
The elements within NPATTERNS are not necessary regular. */
if (builder.can_duplicate_repeating_sequence_p ())
{
- /* We handle the case that we can find a vector containter to hold
+ /* We handle the case that we can find a vector container to hold
element bitsize = NPATTERNS * ele_bitsize.
NPATTERNS = 8, element width = 8
@@ -1251,7 +1251,7 @@ expand_const_vector (rtx target, rtx src)
}
else
{
- /* We handle the case that we can't find a vector containter to hold
+ /* We handle the case that we can't find a vector container to hold
element bitsize = NPATTERNS * ele_bitsize.
NPATTERNS = 8, element width = 16
@@ -3099,7 +3099,7 @@ shuffle_merge_patterns (struct expand_vec_perm_d *d)
if (indices_fit_selector_p)
{
- /* MASK = SELECTOR < NUNTIS ? 1 : 0. */
+ /* MASK = SELECTOR < NUNITS ? 1 : 0. */
rtx sel = vec_perm_indices_to_rtx (sel_mode, d->perm);
rtx x = gen_int_mode (vec_len, GET_MODE_INNER (sel_mode));
insn_code icode = code_for_pred_cmp_scalar (sel_mode);
@@ -3258,7 +3258,7 @@ shuffle_compress_patterns (struct expand_vec_perm_d *d)
int vlen = vec_len.to_constant ();
- /* It's not worthwhile the compress pattern has elemenets < 4
+ /* It's not worthwhile the compress pattern has elements < 4
and we can't modulo indices for compress pattern. */
if (known_ge (d->perm[vlen - 1], vlen * 2) || vlen < 4)
return false;
@@ -3269,7 +3269,7 @@ shuffle_compress_patterns (struct expand_vec_perm_d *d)
/* Compress point is the point that all elements value with index i >=
compress point of the selector are all consecutive series increasing and
- each selector value >= NUNTIS. In this case, we could compress all elements
+ each selector value >= NUNITS. In this case, we could compress all elements
of i < compress point into the op1. */
int compress_point = -1;
for (int i = 0; i < vlen; i++)
@@ -3322,7 +3322,7 @@ shuffle_compress_patterns (struct expand_vec_perm_d *d)
TODO: This cost is not accurate, we can adjust it by tune info. */
int general_cost = 9;
- /* If we can use compress approach, the code squence will be:
+ /* If we can use compress approach, the code sequence will be:
MASK LOAD mask
COMPRESS op1, op0, mask
If it needs slide up, it will be:
@@ -3778,7 +3778,7 @@ expand_select_vl (rtx *ops)
of using vsetvli.
E.g. _255 = .SELECT_VL (3, POLY_INT_CST [4, 4]);
- We move 3 into _255 intead of using explicit vsetvl. */
+ We move 3 into _255 instead of using explicit vsetvl. */
emit_move_insn (ops[0], ops[1]);
return;
}
@@ -653,7 +653,7 @@ DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_p
DEF_RVV_FUNCTION (vlsegff, seg_fault_load, full_preds, tuple_v_scalar_const_ptr_size_ptr_ops)
#undef REQUIRED_EXTENSIONS
-/* Definiation of crypto vector intrinsic functions */
+/* Definition of crypto vector intrinsic functions */
// ZVBB and ZVKB
#define REQUIRED_EXTENSIONS ZVBB_EXT
DEF_RVV_FUNCTION (vbrev, alu, full_preds, u_vv_ops)
@@ -55,7 +55,7 @@ extern const function_shape *const vlenb;
extern const function_shape *const seg_loadstore;
extern const function_shape *const seg_indexed_loadstore;
extern const function_shape *const seg_fault_load;
-/* Below function_shape are Vectro Crypto*/
+/* Below function_shape are Vector Crypto*/
extern const function_shape *const crypto_vv;
extern const function_shape *const crypto_vi;
extern const function_shape *const crypto_vv_no_op_type;
@@ -107,7 +107,7 @@ public:
by the user when calling. */
hashval_t overloaded_hash (const vec<tree, va_gc> &);
- /* The reqired extension for the register function. */
+ /* The required extension for the register function. */
enum required_ext required;
};
@@ -3216,7 +3216,7 @@ register_builtin_types_on_null ()
#include "riscv-vector-builtins.def"
}
-/* Register vector type TYPE under its risv_vector.h name. */
+/* Register vector type TYPE under its riscv_vector.h name. */
static void
register_vector_type (vector_type_index type)
{
@@ -3772,7 +3772,7 @@ function_builder::add_function (const function_instance &instance,
unsigned int code = vec_safe_length (registered_functions);
code = (code << RISCV_BUILTIN_SHIFT) + RISCV_BUILTIN_VECTOR;
- /* We need to be able to generate placeholders to enusre that we have a
+ /* We need to be able to generate placeholders to ensure that we have a
consistent numbering scheme for function codes between the C and C++
frontends, so that everything ties up in LTO.
@@ -4803,7 +4803,7 @@ resolve_overloaded_builtin (location_t loc, unsigned int code, tree fndecl,
if (!rfun || !rfun->overloaded_p)
return NULL_TREE;
- /* According to the rvv intrinisc doc, we have no such overloaded function
+ /* According to the rvv intrinsic doc, we have no such overloaded function
with empty args. Unfortunately, we register the empty args function as
overloaded for avoiding conflict. Thus, there will actual one register
function after return NULL_TREE back to the middle-end, and finally result
@@ -119,7 +119,7 @@ DEF_RVV_TYPE (vbool2_t, 13, __rvv_bool2_t, boolean, RVVMF2BI, _b2, , )
DEF_RVV_TYPE (vbool1_t, 13, __rvv_bool1_t, boolean, RVVM1BI, _b1, , )
/* LMUL = 1/8:
- Only enble when TARGET_MIN_VLEN > 32.
+ Only enable when TARGET_MIN_VLEN > 32.
Machine mode = RVVMF8QImode. */
DEF_RVV_TYPE (vint8mf8_t, 15, __rvv_int8mf8_t, int8, RVVMF8QI, _i8mf8, _i8,
_e8mf8)
@@ -229,7 +229,7 @@ DEF_RVV_TYPE (vuint8m8_t, 15, __rvv_uint8m8_t, uint8, RVVM8QI, _u8m8, _u8,
_e8m8)
/* LMUL = 1/4:
- Only enble when TARGET_MIN_VLEN > 32.
+ Only enable when TARGET_MIN_VLEN > 32.
Machine mode = RVVMF4HImode. */
DEF_RVV_TYPE (vint16mf4_t, 16, __rvv_int16mf4_t, int16, RVVMF4HI, _i16mf4, _i16,
_e16mf4)
@@ -322,7 +322,7 @@ DEF_RVV_TYPE (vuint16m8_t, 16, __rvv_uint16m8_t, uint16, RVVM8HI, _u16m8, _u16,
_e16m8)
/* LMUL = 1/2:
- Only enble when TARGET_MIN_VLEN > 32.
+ Only enable when TARGET_MIN_VLEN > 32.
Machine mode = RVVMF2SImode. */
DEF_RVV_TYPE (vint32mf2_t, 16, __rvv_int32mf2_t, int32, RVVMF2SI, _i32mf2, _i32,
_e32mf2)
@@ -539,7 +539,7 @@ DEF_RVV_TYPE (vfloat16m8_t, 17, __rvv_float16m8_t, float16, RVVM8HF, _f16m8,
/* Disable all when !TARGET_VECTOR_ELEN_FP_32. */
/* LMUL = 1/2:
- Only enble when TARGET_MIN_VLEN > 32.
+ Only enable when TARGET_MIN_VLEN > 32.
Machine mode = RVVMF2SFmode. */
DEF_RVV_TYPE (vfloat32mf2_t, 18, __rvv_float32mf2_t, float, RVVMF2SF, _f32mf2,
_f32, _e32mf2)
@@ -655,7 +655,7 @@ DEF_RVV_BASE_TYPE (unsigned_vector, get_vector_type (type_idx))
/* According to riscv-vector-builtins-types.def, the unsigned
type is always the signed type + 1 (They have same SEW and LMUL).
For example 'vuint8mf8_t' enum = 'vint8mf8_t' enum + 1.
- Note: We dont't allow type_idx to be unsigned type. */
+ Note: We don't allow type_idx to be unsigned type. */
DEF_RVV_BASE_TYPE (unsigned_scalar, builtin_types[type_idx + 1].scalar)
DEF_RVV_BASE_TYPE (vector_ptr, builtin_types[type_idx].vector_ptr)
/* According to the latest rvv-intrinsic-doc, it defines vsm.v intrinsic:
@@ -115,8 +115,8 @@ static const unsigned int CP_WRITE_CSR = 1U << 5;
enum required_ext
{
VECTOR_EXT, /* Vector extension */
- ZVBB_EXT, /* Cryto vector Zvbb sub-ext */
- ZVBB_OR_ZVKB_EXT, /* Cryto vector Zvbb or zvkb sub-ext */
+ ZVBB_EXT, /* Crypto vector Zvbb sub-ext */
+ ZVBB_OR_ZVKB_EXT, /* Crypto vector Zvbb or zvkb sub-ext */
ZVBC_EXT, /* Crypto vector Zvbc sub-ext */
ZVKG_EXT, /* Crypto vector Zvkg sub-ext */
ZVKNED_EXT, /* Crypto vector Zvkned sub-ext */
@@ -125,7 +125,7 @@ enum required_ext
ZVKSED_EXT, /* Crypto vector Zvksed sub-ext */
ZVKSH_EXT, /* Crypto vector Zvksh sub-ext */
XTHEADVECTOR_EXT, /* XTheadVector extension */
- ZVFBFMIN_EXT, /* Zvfbfmin externsion */
+ ZVFBFMIN_EXT, /* Zvfbfmin extension */
ZVFBFWMA_EXT, /* Zvfbfwma extension */
/* Please update below to isa_name func when add or remove enum type(s). */
};
@@ -563,7 +563,7 @@ get_store_value (gimple *stmt)
return gimple_assign_rhs1 (stmt);
}
-/* Return true if addtional vector vars needed. */
+/* Return true if additional vector vars needed. */
static bool
need_additional_vector_vars_p (stmt_vec_info stmt_info)
{
@@ -755,7 +755,7 @@ update_local_live_ranges (
convert it into:
1. MASK_LEN_GATHER_LOAD (..., perm indice).
- 2. Continguous load/store + VEC_PERM (..., perm indice)
+ 2. Contiguous load/store + VEC_PERM (..., perm indice)
We will be likely using one more vector variable. */
unsigned int max_point
@@ -890,7 +890,7 @@ costs::analyze_loop_vinfo (loop_vec_info loop_vinfo)
record_potential_unexpected_spills (loop_vinfo);
}
-/* Analyze the vectorized program stataments and use dynamic LMUL
+/* Analyze the vectorized program statements and use dynamic LMUL
heuristic to detect whether the loop has unexpected spills. */
void
costs::record_potential_unexpected_spills (loop_vec_info loop_vinfo)
@@ -1240,7 +1240,7 @@ costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
{
/* If we're applying the VLA vs. VLS unrolling heuristic,
estimate the number of statements in the unrolled VLS
- loop. For simplicitly, we assume that one iteration of the
+ loop. For simplicity, we assume that one iteration of the
VLS loop would need the same number of statements
as one iteration of the VLA loop. */
if (where == vect_body && m_unrolled_vls_niters)
@@ -1167,7 +1167,7 @@ riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
}
/* With pack we can generate a 64 bit constant with the same high
- and low 32 bits triviall. */
+ and low 32 bits trivially. */
if (cost > 3 && TARGET_64BIT && TARGET_ZBKB)
{
unsigned HOST_WIDE_INT loval = value & 0xffffffff;
@@ -1183,7 +1183,7 @@ riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
/* An arbitrary 64 bit constant can be synthesized in 5 instructions
using zbkb. We may do better than that if the upper or lower half
- can be synthsized with a single LUI, ADDI or BSET. Regardless the
+ can be synthesized with a single LUI, ADDI or BSET. Regardless the
basic steps are the same. */
if (cost > 3 && can_create_pseudo_p ())
{
@@ -5514,7 +5514,7 @@ riscv_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype, rtx, tree, int)
static bool
riscv_vector_type_p (const_tree type)
{
- /* Currently, only builtin scalabler vector type is allowed, in the future,
+ /* Currently, only builtin scalable vector type is allowed, in the future,
more vector types may be allowed, such as GNU vector type, etc. */
return riscv_vector::builtin_type_p (type);
}
@@ -140,7 +140,7 @@ th_mempair_output_move (rtx operands[4], bool load_p,
return "";
}
-/* Analyse if a pair of loads/stores MEM1 and MEM2 with given MODE
+/* Analyze if a pair of loads/stores MEM1 and MEM2 with given MODE
are consecutive so they can be merged into a mempair instruction.
RESERVED will be set to true, if a reversal of the accesses is
required (false otherwise). Returns true if the accesses can be
@@ -891,7 +891,7 @@
;; XTheadFMemIdx
;; Note, that we might get GP registers in FP-mode (reg:DF a2)
;; which cannot be handled by the XTheadFMemIdx instructions.
-;; This might even happend after register allocation.
+;; This might even happen after register allocation.
;; We could implement splitters that undo the combiner results
;; if "after_reload && !HARDFP_REG_P (operands[0])", but this
;; raises even more questions (e.g. split into what?).
@@ -27,7 +27,7 @@
UNSPEC_UNIT_STRIDED
UNSPEC_STRIDED
- ;; It's used to specify ordered/unorderd operation.
+ ;; It's used to specify ordered/unordered operation.
UNSPEC_ORDERED
UNSPEC_UNORDERED
@@ -845,7 +845,7 @@
(const_int 4)
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
- ;; wheras it is pred_strided_load if operands[3] is vector mode.
+ ;; whereas it is pred_strided_load if operands[3] is vector mode.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(const_int 5)
@@ -879,7 +879,7 @@
(symbol_ref "riscv_vector::get_ta(operands[5])")
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
- ;; wheras it is pred_strided_load if operands[3] is vector mode.
+ ;; whereas it is pred_strided_load if operands[3] is vector mode.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(symbol_ref "riscv_vector::get_ta(operands[6])")
@@ -911,7 +911,7 @@
(symbol_ref "riscv_vector::get_ma(operands[6])")
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
- ;; wheras it is pred_strided_load if operands[3] is vector mode.
+ ;; whereas it is pred_strided_load if operands[3] is vector mode.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(symbol_ref "riscv_vector::get_ma(operands[7])")
@@ -947,7 +947,7 @@
(const_int 5)
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
- ;; wheras it is pred_strided_load if operands[3] is vector mode.
+ ;; whereas it is pred_strided_load if operands[3] is vector mode.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(const_int 8)
@@ -1048,7 +1048,7 @@
)
;; This pattern is used to hold the AVL operand for
-;; RVV instructions that implicity use VLMAX AVL.
+;; RVV instructions that implicitly use VLMAX AVL.
;; RVV instruction implicitly use GPR that is ultimately
;; defined by this pattern is safe for VSETVL pass emit
;; a vsetvl instruction modify this register after RA.
@@ -1190,7 +1190,7 @@
;; Also applicable for all register moves.
;; Fractional vector modes load/store are not allowed to match this pattern.
;; Mask modes load/store are not allowed to match this pattern.
-;; We seperate "*mov<mode>" into "*mov<mode>_whole" and "*mov<mode>_fract" because
+;; We separate "*mov<mode>" into "*mov<mode>_whole" and "*mov<mode>_fract" because
;; we don't want to include fractional load/store in "*mov<mode>" which will
;; create unexpected patterns in LRA.
;; For example:
@@ -1206,7 +1206,7 @@
;; (set (reg:RVVMF4QI 98 v2 [orig:134 _1 ] [134])
;; (mem/c:RVVMF4QI (reg:SI 13 a3 [155]) [1 %sfp+[-2, -2] S[2, 2] A8]))
;; (clobber (reg:SI 14 a4 [149]))])
-;; So that we could be able to emit vsetvl instruction using clobber sratch a4.
+;; So that we could be able to emit vsetvl instruction using clobber scratch a4.
;; To let LRA generate the expected pattern, we should exclude fractional vector
;; load/store in "*mov<mode>_whole". Otherwise, it will reload this pattern into:
;; (insn 20 19 9 2 (set (reg:RVVMF4QI 98 v2 [orig:134 _1 ] [134])
@@ -1320,7 +1320,7 @@
;; Define tuple modes data movement.
;; operands[2] is used to save the offset of each subpart.
;; operands[3] is used to calculate the address for each subpart.
-;; operands[4] is VL of vsevli instruction.
+;; operands[4] is VL of vsetvli instruction.
(define_expand "mov<mode>"
[(parallel [(set (match_operand:VT 0 "reg_or_mem_operand")
(match_operand:VT 1 "general_operand"))
@@ -1523,7 +1523,7 @@
;; - 6.1 vsetvli/vsetivl/vsetvl instructions
;; -----------------------------------------------------------------
-;; we dont't define vsetvli as unspec_volatile which has side effects.
+;; we don't define vsetvli as unspec_volatile which has side effects.
;; This instruction can be scheduled by the instruction scheduler.
;; This means these instructions will be deleted when
;; there is no instructions using vl or vtype in the following.
@@ -1589,7 +1589,7 @@
;; vsetvli a4,zero,e32,mf2,ta,ma ;; -
;; vse32.v v24,(a1) ;; vse32.v v24,(a1)
-;; However, it may produce wrong codegen if we exclude VL/VTYPE in "vsevl<mode>".
+;; However, it may produce wrong codegen if we exclude VL/VTYPE in "vsetvl<mode>".
;; 3. void foo (int8_t *in, int8_t *out, int32_t *in2, int32_t *out2, int M)
;; {
;; for (int i = 0; i < M; i++){
@@ -1687,7 +1687,7 @@
(set (attr "ma") (symbol_ref "INTVAL (operands[4])"))])
;; It's emit by vsetvl/vsetvlmax intrinsics with no side effects.
-;; Since we have many optmization passes from "expand" to "reload_completed",
+;; Since we have many optimization passes from "expand" to "reload_completed",
;; such pattern can allow us gain benefits of these optimizations.
(define_insn_and_split "@vsetvl<mode>_no_side_effects"
[(set (match_operand:P 0 "register_operand" "=r")
@@ -2644,7 +2644,7 @@
;; vx instructions patterns.
;; Note: Unlike vv patterns, we should split them since they are variant.
-;; For vsll.vx/vsra.vx/vsrl.vx the scalar mode should be Pmode wheras the
+;; For vsll.vx/vsra.vx/vsrl.vx the scalar mode should be Pmode whereas the
;; scalar mode is inner mode of the RVV mode for other vx patterns.
(define_insn "@pred_<optab><mode>_scalar"
[(set (match_operand:V_VLSI 0 "register_operand" "=vd,vd, vr, vr,vd,vd, vr, vr")
@@ -4198,7 +4198,7 @@
;; -------------------------------------------------------------------------------
;; Includes:
;; - 12.1 Vector Single-Width Saturating Add and Subtract
-;; - 12.2 Vector Single-Width Aaveraging Add and Subtract
+;; - 12.2 Vector Single-Width Averaging Add and Subtract
;; - 12.3 Vector Single-Width Fractional Multiply with Rounding and Saturation
;; - 12.4 Vector Single-Width Scaling Shift Instructions
;; - 12.5 Vector Narrowing Fixed-Point Clip Instructions
@@ -4627,7 +4627,7 @@
;; ---- Predicated integer comparison operations
;; -------------------------------------------------------------------------------
;; Includes:
-;; - 11.8 Vector Integer Comparision Instructions
+;; - 11.8 Vector Integer Comparison Instructions
;; -------------------------------------------------------------------------------
(define_expand "@pred_cmp<mode>"
@@ -5925,7 +5925,7 @@
;; -------------------------------------------------------------------------------
;; We keep this pattern same as pred_mov so that we can gain more optimizations.
-;; For example, if we have vmxor.mm v1,v1,v1. It will be optmized as vmclr.m which
+;; For example, if we have vmxor.mm v1,v1,v1. It will be optimized as vmclr.m which
;; is generated by pred_mov.
(define_insn "@pred_<optab><mode>"
[(set (match_operand:VB_VLS 0 "register_operand" "=vr")
@@ -1,4 +1,4 @@
-;; Machine description for RISC-V Zc extention.
+;; Machine description for RISC-V Zc extension.
;; Copyright (C) 2023-2024 Free Software Foundation, Inc.
;; Contributed by Fei Gao (gaofei@eswincomputing.com).