qemu-trivial
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-trivial] [PATCH v2] target-arm: Fix typos in comments


From: Peter Crosthwaite
Subject: Re: [Qemu-trivial] [PATCH v2] target-arm: Fix typos in comments
Date: Tue, 07 Aug 2012 15:36:11 +1000

On Mon, 2012-08-06 at 17:42 +0100, Peter Maydell wrote:
> Fix a variety of typos in comments in target-arm files.
> 
> Signed-off-by: Peter Maydell <address@hidden>

Reviewed-by: Peter Crosthwaite <address@hidden>

> ---
> Changes v1->v2: s/inputs values/input values/
> 
>  target-arm/arm-semi.c    |    2 +-
>  target-arm/cpu.h         |    2 +-
>  target-arm/helper.c      |    6 +++---
>  target-arm/neon_helper.c |   26 +++++++++++++-------------
>  target-arm/op_helper.c   |    2 +-
>  target-arm/translate.c   |   10 +++++-----
>  6 files changed, 24 insertions(+), 24 deletions(-)
> 
> diff --git a/target-arm/arm-semi.c b/target-arm/arm-semi.c
> index 88ca9bb..2495206 100644
> --- a/target-arm/arm-semi.c
> +++ b/target-arm/arm-semi.c
> @@ -281,7 +281,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
>              return len - ret;
>          }
>      case TARGET_SYS_READC:
> -       /* XXX: Read from debug cosole. Not implemented.  */
> +       /* XXX: Read from debug console. Not implemented.  */
>          return 0;
>      case TARGET_SYS_ISTTY:
>          if (use_gdb_syscalls()) {
> diff --git a/target-arm/cpu.h b/target-arm/cpu.h
> index 191895c..d7f93d9 100644
> --- a/target-arm/cpu.h
> +++ b/target-arm/cpu.h
> @@ -79,7 +79,7 @@ struct arm_boot_info;
>  typedef struct CPUARMState {
>      /* Regs for current mode.  */
>      uint32_t regs[16];
> -    /* Frequently accessed CPSR bits are stored separately for efficiently.
> +    /* Frequently accessed CPSR bits are stored separately for efficiency.
>         This contains all the other bits.  Use cpsr_{read,write} to access
>         the whole CPSR.  */
>      uint32_t uncached_cpsr;
> diff --git a/target-arm/helper.c b/target-arm/helper.c
> index 5727da2..dceaa95 100644
> --- a/target-arm/helper.c
> +++ b/target-arm/helper.c
> @@ -988,7 +988,7 @@ static void ttbr164_reset(CPUARMState *env, const 
> ARMCPRegInfo *ri)
>  }
>  
>  static const ARMCPRegInfo lpae_cp_reginfo[] = {
> -    /* NOP AMAIR0/1: the override is because these clash with tha rather
> +    /* NOP AMAIR0/1: the override is because these clash with the rather
>       * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
>       */
>      { .name = "AMAIR0", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
> @@ -2899,8 +2899,8 @@ uint32_t HELPER(logicq_cc)(uint64_t val)
>      return (val >> 32) | (val != 0);
>  }
>  
> -/* VFP support.  We follow the convention used for VFP instrunctions:
> -   Single precition routines have a "s" suffix, double precision a
> +/* VFP support.  We follow the convention used for VFP instructions:
> +   Single precision routines have a "s" suffix, double precision a
>     "d" suffix.  */
>  
>  /* Convert host exception flags to vfp form.  */
> diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
> index e0b9dbf..8bb5129 100644
> --- a/target-arm/neon_helper.c
> +++ b/target-arm/neon_helper.c
> @@ -530,7 +530,7 @@ NEON_VOP(rshl_s16, neon_s16, 2)
>  #undef NEON_FN
>  
>  /* The addition of the rounding constant may overflow, so we use an
> - * intermediate 64 bits accumulator.  */
> + * intermediate 64 bit accumulator.  */
>  uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
>  {
>      int32_t dest;
> @@ -547,8 +547,8 @@ uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t 
> shiftop)
>      return dest;
>  }
>  
> -/* Handling addition overflow with 64 bits inputs values is more
> - * tricky than with 32 bits values.  */
> +/* Handling addition overflow with 64 bit input values is more
> + * tricky than with 32 bit values.  */
>  uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
>  {
>      int8_t shift = (int8_t)shiftop;
> @@ -590,7 +590,7 @@ NEON_VOP(rshl_u16, neon_u16, 2)
>  #undef NEON_FN
>  
>  /* The addition of the rounding constant may overflow, so we use an
> - * intermediate 64 bits accumulator.  */
> + * intermediate 64 bit accumulator.  */
>  uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
>  {
>      uint32_t dest;
> @@ -608,8 +608,8 @@ uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t 
> shiftop)
>      return dest;
>  }
>  
> -/* Handling addition overflow with 64 bits inputs values is more
> - * tricky than with 32 bits values.  */
> +/* Handling addition overflow with 64 bit input values is more
> + * tricky than with 32 bit values.  */
>  uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
>  {
>      int8_t shift = (uint8_t)shiftop;
> @@ -817,7 +817,7 @@ NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
>  #undef NEON_FN
>  
>  /* The addition of the rounding constant may overflow, so we use an
> - * intermediate 64 bits accumulator.  */
> + * intermediate 64 bit accumulator.  */
>  uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t 
> shiftop)
>  {
>      uint32_t dest;
> @@ -846,8 +846,8 @@ uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, 
> uint32_t val, uint32_t shiftop
>      return dest;
>  }
>  
> -/* Handling addition overflow with 64 bits inputs values is more
> - * tricky than with 32 bits values.  */
> +/* Handling addition overflow with 64 bit input values is more
> + * tricky than with 32 bit values.  */
>  uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t 
> shiftop)
>  {
>      int8_t shift = (int8_t)shiftop;
> @@ -914,7 +914,7 @@ NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
>  #undef NEON_FN
>  
>  /* The addition of the rounding constant may overflow, so we use an
> - * intermediate 64 bits accumulator.  */
> + * intermediate 64 bit accumulator.  */
>  uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t 
> shiftop)
>  {
>      int32_t dest;
> @@ -942,8 +942,8 @@ uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, 
> uint32_t valop, uint32_t shift
>      return dest;
>  }
>  
> -/* Handling addition overflow with 64 bits inputs values is more
> - * tricky than with 32 bits values.  */
> +/* Handling addition overflow with 64 bit input values is more
> + * tricky than with 32 bit values.  */
>  uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t 
> shiftop)
>  {
>      int8_t shift = (uint8_t)shiftop;
> @@ -1671,7 +1671,7 @@ uint64_t HELPER(neon_negl_u64)(uint64_t x)
>      return -x;
>  }
>  
> -/* Saturnating sign manuipulation.  */
> +/* Saturating sign manipulation.  */
>  /* ??? Make these use NEON_VOP1 */
>  #define DO_QABS8(x) do { \
>      if (x == (int8_t)0x80) { \
> diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
> index 490111c..d77bfab 100644
> --- a/target-arm/op_helper.c
> +++ b/target-arm/op_helper.c
> @@ -99,7 +99,7 @@ void tlb_fill(CPUARMState *env1, target_ulong addr, int 
> is_write, int mmu_idx,
>  }
>  #endif
>  
> -/* FIXME: Pass an axplicit pointer to QF to CPUARMState, and move saturating
> +/* FIXME: Pass an explicit pointer to QF to CPUARMState, and move saturating
>     instructions into helper.c  */
>  uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
>  {
> diff --git a/target-arm/translate.c b/target-arm/translate.c
> index 29008a4..985e007 100644
> --- a/target-arm/translate.c
> +++ b/target-arm/translate.c
> @@ -53,7 +53,7 @@ typedef struct DisasContext {
>      int condjmp;
>      /* The label that will be jumped to when the instruction is skipped.  */
>      int condlabel;
> -    /* Thumb-2 condtional execution bits.  */
> +    /* Thumb-2 conditional execution bits.  */
>      int condexec_mask;
>      int condexec_cond;
>      struct TranslationBlock *tb;
> @@ -77,7 +77,7 @@ static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
>  #endif
>  
>  /* These instructions trap after executing, so defer them until after the
> -   conditional executions state has been updated.  */
> +   conditional execution state has been updated.  */
>  #define DISAS_WFI 4
>  #define DISAS_SWI 5
>  
> @@ -155,7 +155,7 @@ static void load_reg_var(DisasContext *s, TCGv var, int 
> reg)
>  {
>      if (reg == 15) {
>          uint32_t addr;
> -        /* normaly, since we updated PC, we need only to add one insn */
> +        /* normally, since we updated PC, we need only to add one insn */
>          if (s->thumb)
>              addr = (long)s->pc + 2;
>          else
> @@ -4897,7 +4897,7 @@ static int disas_neon_data_insn(CPUARMState * env, 
> DisasContext *s, uint32_t ins
>                      size--;
>              }
>              shift = (insn >> 16) & ((1 << (3 + size)) - 1);
> -            /* To avoid excessive dumplication of ops we implement shift
> +            /* To avoid excessive duplication of ops we implement shift
>                 by immediate using the variable shift operations.  */
>              if (op < 8) {
>                  /* Shift by immediate:
> @@ -6402,7 +6402,7 @@ static void gen_logicq_cc(TCGv_i64 val)
>  
>  /* Load/Store exclusive instructions are implemented by remembering
>     the value/address loaded, and seeing if these are the same
> -   when the store is performed. This should be is sufficient to implement
> +   when the store is performed. This should be sufficient to implement
>     the architecturally mandated semantics, and avoids having to monitor
>     regular stores.
>  





reply via email to

[Prev in Thread] Current Thread [Next in Thread]