Merge "QEMU update with VENOM (CVE-2015-3456) patch" into 5.0.2
[packages/centos6/qemu.git] / 0090-tcg-sparc-Use-defines-for-temporaries.patch
1 From fc9726f880dea515a2cf98456c5f03a1388e4e14 Mon Sep 17 00:00:00 2001
2 From: Richard Henderson <rth@twiddle.net>
3 Date: Sun, 25 Mar 2012 22:04:59 +0200
4 Subject: [PATCH] tcg-sparc: Use defines for temporaries.
5
6 And change from %i4/%i5 to %g1/%o7 to remove a v8plus fixme.
7
8 Signed-off-by: Richard Henderson <rth@twiddle.net>
9 Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
10 ---
11  tcg/sparc/tcg-target.c | 115 +++++++++++++++++++++++++------------------------
12  1 file changed, 59 insertions(+), 56 deletions(-)
13
14 diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
15 index be5c170..d401f8e 100644
16 --- a/tcg/sparc/tcg-target.c
17 +++ b/tcg/sparc/tcg-target.c
18 @@ -59,8 +59,12 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
19  };
20  #endif
21  
22 +/* Define some temporary registers.  T2 is used for constant generation.  */
23 +#define TCG_REG_T1  TCG_REG_G1
24 +#define TCG_REG_T2  TCG_REG_O7
25 +
26  #ifdef CONFIG_USE_GUEST_BASE
27 -# define TCG_GUEST_BASE_REG TCG_REG_I3
28 +# define TCG_GUEST_BASE_REG TCG_REG_I5
29  #else
30  # define TCG_GUEST_BASE_REG TCG_REG_G0
31  #endif
32 @@ -79,6 +83,7 @@ static const int tcg_target_reg_alloc_order[] = {
33      TCG_REG_I2,
34      TCG_REG_I3,
35      TCG_REG_I4,
36 +    TCG_REG_I5,
37  };
38  
39  static const int tcg_target_call_iarg_regs[6] = {
40 @@ -366,10 +371,10 @@ static inline void tcg_out_movi(TCGContext *s, TCGType type,
41          tcg_out_sethi(s, ret, ~arg);
42          tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
43      } else {
44 -        tcg_out_movi_imm32(s, TCG_REG_I4, arg >> (TCG_TARGET_REG_BITS / 2));
45 -        tcg_out_arithi(s, TCG_REG_I4, TCG_REG_I4, 32, SHIFT_SLLX);
46 -        tcg_out_movi_imm32(s, ret, arg);
47 -        tcg_out_arith(s, ret, ret, TCG_REG_I4, ARITH_OR);
48 +        tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
49 +        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
50 +        tcg_out_movi_imm32(s, TCG_REG_T2, arg);
51 +        tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
52      }
53  }
54  
55 @@ -386,8 +391,8 @@ static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
56          tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
57                    INSN_IMM13(offset));
58      } else {
59 -        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
60 -        tcg_out_ldst_rr(s, ret, addr, TCG_REG_I5, op);
61 +        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
62 +        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
63      }
64  }
65  
66 @@ -428,8 +433,8 @@ static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
67          if (check_fit_tl(val, 13))
68              tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
69          else {
70 -            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, val);
71 -            tcg_out_arith(s, reg, reg, TCG_REG_I5, ARITH_ADD);
72 +            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
73 +            tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
74          }
75      }
76  }
77 @@ -441,8 +446,8 @@ static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
78          if (check_fit_tl(val, 13))
79              tcg_out_arithi(s, rd, rs, val, ARITH_AND);
80          else {
81 -            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, val);
82 -            tcg_out_arith(s, rd, rs, TCG_REG_I5, ARITH_AND);
83 +            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
84 +            tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
85          }
86      }
87  }
88 @@ -454,8 +459,8 @@ static void tcg_out_div32(TCGContext *s, int rd, int rs1,
89      if (uns) {
90          tcg_out_sety(s, TCG_REG_G0);
91      } else {
92 -        tcg_out_arithi(s, TCG_REG_I5, rs1, 31, SHIFT_SRA);
93 -        tcg_out_sety(s, TCG_REG_I5);
94 +        tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
95 +        tcg_out_sety(s, TCG_REG_T1);
96      }
97  
98      tcg_out_arithc(s, rd, rs1, val2, val2const,
99 @@ -601,8 +606,8 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
100      case TCG_COND_GTU:
101      case TCG_COND_GEU:
102          if (c2const && c2 != 0) {
103 -            tcg_out_movi_imm13(s, TCG_REG_I5, c2);
104 -            c2 = TCG_REG_I5;
105 +            tcg_out_movi_imm13(s, TCG_REG_T1, c2);
106 +            c2 = TCG_REG_T1;
107          }
108          t = c1, c1 = c2, c2 = t, c2const = 0;
109          cond = tcg_swap_cond(cond);
110 @@ -649,15 +654,15 @@ static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
111  
112      switch (cond) {
113      case TCG_COND_EQ:
114 -        tcg_out_setcond_i32(s, TCG_COND_EQ, TCG_REG_I5, al, bl, blconst);
115 +        tcg_out_setcond_i32(s, TCG_COND_EQ, TCG_REG_T1, al, bl, blconst);
116          tcg_out_setcond_i32(s, TCG_COND_EQ, ret, ah, bh, bhconst);
117 -        tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_AND);
118 +        tcg_out_arith(s, ret, ret, TCG_REG_T1, ARITH_AND);
119          break;
120  
121      case TCG_COND_NE:
122 -        tcg_out_setcond_i32(s, TCG_COND_NE, TCG_REG_I5, al, al, blconst);
123 +        tcg_out_setcond_i32(s, TCG_COND_NE, TCG_REG_T1, al, al, blconst);
124          tcg_out_setcond_i32(s, TCG_COND_NE, ret, ah, bh, bhconst);
125 -        tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_OR);
126 +        tcg_out_arith(s, ret, ret, TCG_REG_T1, ARITH_OR);
127          break;
128  
129      default:
130 @@ -935,8 +940,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
131  #else
132      addr_reg = args[addrlo_idx];
133      if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
134 -        tcg_out_arithi(s, TCG_REG_I5, addr_reg, 0, SHIFT_SRL);
135 -        addr_reg = TCG_REG_I5;
136 +        tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
137 +        addr_reg = TCG_REG_T1;
138      }
139      if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
140          int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
141 @@ -979,12 +984,11 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
142                                  offsetof(CPUTLBEntry, addr_write));
143  
144      if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
145 -        /* Reconstruct the full 64-bit value in %g1, using %o2 as temp.  */
146 -        /* ??? Redefine the temps from %i4/%i5 so that we have a o/g temp. */
147 -        tcg_out_arithi(s, TCG_REG_G1, datalo, 0, SHIFT_SRL);
148 +        /* Reconstruct the full 64-bit value.  */
149 +        tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
150          tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
151 -        tcg_out_arith(s, TCG_REG_G1, TCG_REG_G1, TCG_REG_O2, ARITH_OR);
152 -        datalo = TCG_REG_G1;
153 +        tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
154 +        datalo = TCG_REG_O2;
155      }
156  
157      /* The fast path is exactly one insn.  Thus we can perform the entire
158 @@ -1024,16 +1028,14 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
159  #else
160      addr_reg = args[addrlo_idx];
161      if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
162 -        tcg_out_arithi(s, TCG_REG_I5, addr_reg, 0, SHIFT_SRL);
163 -        addr_reg = TCG_REG_I5;
164 +        tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
165 +        addr_reg = TCG_REG_T1;
166      }
167      if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
168 -        /* Reconstruct the full 64-bit value in %g1, using %o2 as temp.  */
169 -        /* ??? Redefine the temps from %i4/%i5 so that we have a o/g temp. */
170 -        tcg_out_arithi(s, TCG_REG_G1, datalo, 0, SHIFT_SRL);
171 +        tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
172          tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
173 -        tcg_out_arith(s, TCG_REG_G1, TCG_REG_G1, TCG_REG_O2, ARITH_OR);
174 -        datalo = TCG_REG_G1;
175 +        tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
176 +        datalo = TCG_REG_O2;
177      }
178      tcg_out_ldst_rr(s, datalo, addr_reg,
179                      (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
180 @@ -1057,28 +1059,29 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
181      case INDEX_op_goto_tb:
182          if (s->tb_jmp_offset) {
183              /* direct jump method */
184 -            tcg_out_sethi(s, TCG_REG_I5, args[0] & 0xffffe000);
185 -            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
186 +            tcg_out_sethi(s, TCG_REG_T1, args[0] & 0xffffe000);
187 +            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
188                        INSN_IMM13((args[0] & 0x1fff)));
189              s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
190          } else {
191              /* indirect jump method */
192 -            tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
193 -            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
194 +            tcg_out_ld_ptr(s, TCG_REG_T1,
195 +                           (tcg_target_long)(s->tb_next + args[0]));
196 +            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
197                        INSN_RS2(TCG_REG_G0));
198          }
199          tcg_out_nop(s);
200          s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
201          break;
202      case INDEX_op_call:
203 -        if (const_args[0])
204 +        if (const_args[0]) {
205              tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
206                                     - (tcg_target_ulong)s->code_ptr) >> 2)
207                                   & 0x3fffffff));
208 -        else {
209 -            tcg_out_ld_ptr(s, TCG_REG_I5,
210 +        } else {
211 +            tcg_out_ld_ptr(s, TCG_REG_T1,
212                             (tcg_target_long)(s->tb_next + args[0]));
213 -            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) |
214 +            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
215                        INSN_RS2(TCG_REG_G0));
216          }
217          /* delay slot */
218 @@ -1184,11 +1187,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
219  
220      case INDEX_op_rem_i32:
221      case INDEX_op_remu_i32:
222 -        tcg_out_div32(s, TCG_REG_I5, args[1], args[2], const_args[2],
223 +        tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
224                        opc == INDEX_op_remu_i32);
225 -        tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
226 +        tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
227                         ARITH_UMUL);
228 -        tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
229 +        tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
230          break;
231  
232      case INDEX_op_brcond_i32:
233 @@ -1305,11 +1308,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
234          goto gen_arith;
235      case INDEX_op_rem_i64:
236      case INDEX_op_remu_i64:
237 -        tcg_out_arithc(s, TCG_REG_I5, args[1], args[2], const_args[2],
238 +        tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
239                         opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
240 -        tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
241 +        tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
242                         ARITH_MULX);
243 -        tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
244 +        tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
245          break;
246      case INDEX_op_ext32s_i64:
247          if (const_args[1]) {
248 @@ -1507,15 +1510,15 @@ static void tcg_target_init(TCGContext *s)
249                       (1 << TCG_REG_O7));
250  
251      tcg_regset_clear(s->reserved_regs);
252 -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
253 -#if TCG_TARGET_REG_BITS == 64
254 -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I4); // for internal use
255 -#endif
256 -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
257 -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
258 -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
259 -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
260 -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
261 +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
262 +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
263 +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
264 +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
265 +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
266 +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
267 +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
268 +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
269 +
270      tcg_add_target_add_op_defs(sparc_op_defs);
271  }
272  
273 -- 
274 1.7.12.1
275