001    /*
002     *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003     *
004     *  This file is licensed to You under the Eclipse Public License (EPL);
005     *  You may not use this file except in compliance with the License. You
006     *  may obtain a copy of the License at
007     *
008     *      http://www.opensource.org/licenses/eclipse-1.0.php
009     *
010     *  See the COPYRIGHT.txt file distributed with this work for information
011     *  regarding copyright ownership.
012     */
013    package org.jikesrvm.compilers.opt.mir2mc.ia32;
014    
015    import java.util.ArrayList;
016    import java.util.Enumeration;
017    
018    import static org.jikesrvm.ia32.ArchConstants.SSE2_FULL;
019    import org.jikesrvm.ArchitectureSpecificOpt.AssemblerOpt;
020    import org.jikesrvm.ArchitectureSpecific.Assembler;
021    import org.jikesrvm.VM;
022    import org.jikesrvm.Constants;
023    import org.jikesrvm.compilers.common.assembler.ForwardReference;
024    import org.jikesrvm.compilers.opt.OptimizingCompilerException;
025    import org.jikesrvm.compilers.opt.ir.MIR_BinaryAcc;
026    import org.jikesrvm.compilers.opt.ir.MIR_Branch;
027    import org.jikesrvm.compilers.opt.ir.MIR_Call;
028    import org.jikesrvm.compilers.opt.ir.MIR_Compare;
029    import org.jikesrvm.compilers.opt.ir.MIR_CondBranch;
030    import org.jikesrvm.compilers.opt.ir.MIR_Lea;
031    import org.jikesrvm.compilers.opt.ir.MIR_LowTableSwitch;
032    import org.jikesrvm.compilers.opt.ir.MIR_Move;
033    import org.jikesrvm.compilers.opt.ir.MIR_Test;
034    import org.jikesrvm.compilers.opt.ir.MIR_Unary;
035    import org.jikesrvm.compilers.opt.ir.MIR_UnaryNoRes;
036    import org.jikesrvm.compilers.opt.ir.IR;
037    import org.jikesrvm.compilers.opt.ir.Instruction;
038    import org.jikesrvm.compilers.opt.ir.Operator;
039    import org.jikesrvm.compilers.opt.ir.Operators;
040    import org.jikesrvm.compilers.opt.ir.Register;
041    import org.jikesrvm.compilers.opt.ir.ia32.PhysicalRegisterSet;
042    import org.jikesrvm.compilers.opt.ir.operand.BranchOperand;
043    import org.jikesrvm.compilers.opt.ir.operand.IntConstantOperand;
044    import org.jikesrvm.compilers.opt.ir.operand.MemoryOperand;
045    import org.jikesrvm.compilers.opt.ir.operand.Operand;
046    import org.jikesrvm.compilers.opt.ir.operand.RegisterOperand;
047    import org.jikesrvm.compilers.opt.ir.operand.TrapCodeOperand;
048    import org.jikesrvm.compilers.opt.ir.operand.ia32.IA32ConditionOperand;
049    import org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants;
050    import org.jikesrvm.ia32.TrapConstants;
051    import org.vmmagic.pragma.NoInline;
052    import org.vmmagic.unboxed.Offset;
053    
054    /**
055     *  This class provides support functionality used by the generated
056     * Assembler; it handles basic impedance-matching functionality
057     * such as determining which addressing mode is suitable for a given
058     * IA32MemoryOperand.  This class also provides some boilerplate
059     * methods that do not depend on how instructions should actually be
060     * assembled, like the top-level generateCode driver.  This class is
061     * not meant to be used in isolation, but rather to provide support
062     * from the Assembler.
063     */
064    abstract class AssemblerBase extends Assembler
065        implements Operators, Constants, PhysicalRegisterConstants {
066    
067      private static final boolean DEBUG_ESTIMATE = false;
068    
069      /**
070       * Hold EBP register object for use in estimating size of memory operands.
071       */
072      private final Register EBP;
073    
074      /**
075       * Hold EBP register object for use in estimating size of memory operands.
076       */
077      private final Register ESP;
078    
079      /**
080       * Operators with byte arguments
081       */
082      private static final Operator[] byteSizeOperators;
083    
084      /**
085       * Operators with word arguments
086       */
087      private static final Operator[] wordSizeOperators;
088    
089      /**
090       * Operators with quad arguments
091       */
092      private static final Operator[] quadSizeOperators;
093    
094      static {
095        ArrayList<Operator> temp = new ArrayList<Operator>();
096        for (Operator opr : Operator.OperatorArray) {
097          if (opr != null && opr.toString().indexOf("__b") != -1) {
098            temp.add(opr);
099          }
100        }
101        byteSizeOperators = temp.toArray(new Operator[temp.size()]);
102        temp.clear();
103        for (Operator opr : Operator.OperatorArray) {
104          if (opr != null && opr.toString().indexOf("__w") != -1) {
105            temp.add(opr);
106          }
107        }
108        wordSizeOperators = temp.toArray(new Operator[temp.size()]);
109        for (Operator opr : Operator.OperatorArray) {
110          if (opr != null && opr.toString().indexOf("__q") != -1) {
111            temp.add(opr);
112          }
113        }
114        quadSizeOperators = temp.toArray(new Operator[temp.size()]);
115      }
116    
117      /**
118       * Construct Assembler object
119       * @see Assembler
120       */
121      AssemblerBase(int bytecodeSize, boolean shouldPrint, IR ir) {
122        super(bytecodeSize, shouldPrint);
123        EBP = ir.regpool.getPhysicalRegisterSet().getEBP();
124        ESP = ir.regpool.getPhysicalRegisterSet().getESP();
125      }
126    
127      /**
128       * Should code created by this assembler instance be allocated in the
129       * hot code code space? The default answer for opt compiled code is yes
130       * (otherwise why are we opt compiling it?).
131       */
132      @Override
133      protected boolean isHotCode() { return true; }
134    
135      /**
136       *  Is the given operand an immediate?  In the IA32 assembly, one
137       * cannot specify floating-point constants, so the possible
138       * immediates we may see are IntegerConstants and
139       * TrapConstants (a trap constant really is an integer), and
140       * jump targets for which the exact offset is known.
141       *
142       * @see #getImm
143       *
144       * @param op the operand being queried
145       * @return true if op represents an immediate
146       */
147      boolean isImm(Operand op) {
148        return (op instanceof IntConstantOperand) ||
149               (op instanceof TrapCodeOperand) ||
150               (op instanceof BranchOperand && op.asBranch().target.getmcOffset() >= 0);
151      }
152    
153      /**
154       *  Return the IA32 ISA encoding of the immediate value
155       * represented by the the given operand.  This method assumes the
156       * operand is an immediate and will likely throw a
157       * ClassCastException if this not the case.  It treats
158       * BranchOperands somewhat differently than isImm does: in
159       * case a branch target is not resolved, it simply returns a wrong
160       * answer and trusts the caller to ignore it. This behavior
161       * simplifies life when generating code for ImmOrLabel operands.
162       *
163       * @see #isImm
164       *
165       * @param op the operand being queried
166       * @return the immediate value represented by the operand
167       */
168      int getImm(Operand op) {
169        if (op.isIntConstant()) {
170          return op.asIntConstant().value;
171        } else if (op.isBranch()) {
172          // used by ImmOrLabel stuff
173          return op.asBranch().target.getmcOffset();
174        } else {
175          return ((TrapCodeOperand) op).getTrapCode() + TrapConstants.RVM_TRAP_BASE;
176        }
177      }
178    
179      /**
180       *  Is the given operand a register operand?
181       *
182       * @see #getReg
183       *
184       * @param op the operand being queried
185       * @return true if op is an RegisterOperand
186       */
187      boolean isReg(Operand op) {
188        return op.isRegister();
189      }
190    
191      boolean isGPR_Reg(Operand op) {
192        return isReg(op);
193      }
194    
195      boolean isFPR_Reg(Operand op) {
196        return isReg(op);
197      }
198    
199      boolean isMM_Reg(Operand op) {
200        return false; // MM registers not currently supported in the OPT compiler
201      }
202    
203      boolean isXMM_Reg(Operand op) {
204        return op.isRegister() && (op.isFloat() || op.isDouble());
205      }
206    
207      /**
208       * Return the machine-level register number corresponding to a given integer
209       * Register. The optimizing compiler has its own notion of register
210       * numbers, which is not the same as the numbers used by the IA32 ISA. This
211       * method takes an optimizing compiler register and translates it into the
212       * appropriate machine-level encoding. This method is not applied directly to
213       * operands, but rather to register objects.
214       *
215       * @see #getBase
216       * @see #getIndex
217       *
218       * @param reg the register being queried
219       * @return the 3 bit machine-level encoding of reg
220       */
221      private GPR getGPMachineRegister(Register reg) {
222        if (VM.VerifyAssertions) {
223          VM._assert(PhysicalRegisterSet.getPhysicalRegisterType(reg) == INT_REG);
224        }
225        return GPR.lookup(reg.number - FIRST_INT);
226      }
227    
228      /**
229       * Return the machine-level register number corresponding to a
230       * given Register.  The optimizing compiler has its own notion
231       * of register numbers, which is not the same as the numbers used
232       * by the IA32 ISA.  This method takes an optimizing compiler
233       * register and translates it into the appropriate machine-level
234       * encoding.  This method is not applied directly to operands, but
235       * rather to register objects.
236       *
237       * @see #getReg
238       * @see #getBase
239       * @see #getIndex
240       *
241       * @param reg the register being queried
242       * @return the 3 bit machine-level encoding of reg
243       */
244      private MachineRegister getMachineRegister(Register reg) {
245        int type = PhysicalRegisterSet.getPhysicalRegisterType(reg);
246        MachineRegister result;
247        if (type == INT_REG) {
248          result = GPR.lookup(reg.number - FIRST_INT);
249        } else {
250          if (VM.VerifyAssertions) VM._assert(type == DOUBLE_REG);
251          if (SSE2_FULL) {
252            if (reg.number < FIRST_SPECIAL) {
253              result = XMM.lookup(reg.number - FIRST_DOUBLE);
254            } else if (reg.number == ST0) {
255              result = FP0;
256            } else {
257              if (VM.VerifyAssertions) VM._assert(reg.number == ST1);
258              result = FP1;
259            }
260          } else {
261            result = FPR.lookup(reg.number - FIRST_DOUBLE);
262          }
263        }
264        return result;
265      }
266    
267      /**
268       * Given a register operand, return the 3 bit IA32 ISA encoding
269       * of that register.  This function translates an optimizing
270       * compiler register operand into the 3 bit IA32 ISA encoding that
271       * can be passed to the Assembler.  This function assumes its
272       * operand is a register operand, and will blow up if it is not;
273       * use isReg to check operands passed to this method.
274       *
275       * @see #isReg
276       *
277       * @param op the register operand being queried
278       * @return the 3 bit IA32 ISA encoding of op
279       */
280      MachineRegister getReg(Operand op) {
281        return getMachineRegister(op.asRegister().getRegister());
282      }
283    
284      GPR getGPR_Reg(Operand op) {
285        return getGPMachineRegister(op.asRegister().getRegister());
286      }
287    
288      FPR getFPR_Reg(Operand op) {
289        return (FPR)getMachineRegister(op.asRegister().getRegister());
290      }
291    
292      MM getMM_Reg(Operand op) {
293        if (VM.VerifyAssertions) VM._assert(VM.NOT_REACHED, "MM registers not currently supported in the opt compiler");
294        return null;
295      }
296    
297      XMM getXMM_Reg(Operand op) {
298        return (XMM)getMachineRegister(op.asRegister().getRegister());
299      }
300    
301      /**
302       * Given a memory operand, return the 3 bit IA32 ISA encoding
303       * of its base regsiter.  This function translates the optimizing
304       * compiler register operand representing the base of the given
305       * memory operand into the 3 bit IA32 ISA encoding that
306       * can be passed to the Assembler.  This function assumes its
307       * operand is a memory operand, and will blow up if it is not;
308       * one should confirm an operand really has a base register before
309       * invoking this method on it.
310       *
311       * @see #isRegDisp
312       * @see #isRegIdx
313       * @see #isRegInd
314       *
315       * @param op the register operand being queried
316       * @return the 3 bit IA32 ISA encoding of the base register of op
317       */
318      GPR getBase(Operand op) {
319        return getGPMachineRegister(((MemoryOperand) op).base.getRegister());
320      }
321    
322      /**
323       * Given a memory operand, return the 3 bit IA32 ISA encoding
324       * of its index register.  This function translates the optimizing
325       * compiler register operand representing the index of the given
326       * memory operand into the 3 bit IA32 ISA encoding that
327       * can be passed to the Assembler.  This function assumes its
328       * operand is a memory operand, and will blow up if it is not;
329       * one should confirm an operand really has an index register before
330       * invoking this method on it.
331       *
332       * @see #isRegIdx
333       * @see #isRegOff
334       *
335       * @param op the register operand being queried
336       * @return the 3 bit IA32 ISA encoding of the index register of op
337       */
338      GPR getIndex(Operand op) {
339        return getGPMachineRegister(((MemoryOperand) op).index.getRegister());
340      }
341    
342      /**
343       *  Given a memory operand, return the 2 bit IA32 ISA encoding
344       * of its scale, suitable for passing to the Assembler to mask
345       * into a SIB byte.  This function assumes its operand is a memory
346       * operand, and will blow up if it is not; one should confirm an
347       * operand really has a scale before invoking this method on it.
348       *
349       * @see #isRegIdx
350       * @see #isRegOff
351       *
352       * @param op the register operand being queried
353       * @return the IA32 ISA encoding of the scale of op
354       */
355      short getScale(Operand op) {
356        return ((MemoryOperand) op).scale;
357      }
358    
359      /**
360       *  Given a memory operand, return the 2 bit IA32 ISA encoding
361       * of its scale, suitable for passing to the Assembler to mask
362       * into a SIB byte.  This function assumes its operand is a memory
363       * operand, and will blow up if it is not; one should confirm an
364       * operand really has a scale before invoking this method on it.
365       *
366       * @see #isRegIdx
367       * @see #isRegOff
368       *
369       * @param op the register operand being queried
370       * @return the IA32 ISA encoding of the scale of op
371       */
372      Offset getDisp(Operand op) {
373        return ((MemoryOperand) op).disp;
374      }
375    
376      /**
377       *  Determine if a given operand is a memory operand representing
378       * register-displacement mode addressing.  This method takes an
379       * arbitrary operand, checks whether it is a memory operand, and,
380       * if it is, checks whether it should be assembled as IA32
381       * register-displacement mode.  That is, does it have a non-zero
382       * displacement and a base register, but no scale and no index
383       * register?
384       *
385       * @param op the operand being queried
386       * @return true if op should be assembled as register-displacement mode
387       */
388      boolean isRegDisp(Operand op) {
389        if (op instanceof MemoryOperand) {
390          MemoryOperand mop = (MemoryOperand) op;
391          return (mop.base != null) && (mop.index == null) && (!mop.disp.isZero()) && (mop.scale == 0);
392        } else {
393          return false;
394        }
395      }
396    
397      /**
398       * Determine if a given operand is a memory operand representing
399       * absolute mode addressing.  This method takes an
400       * arbitrary operand, checks whether it is a memory operand, and,
401       * if it is, checks whether it should be assembled as IA32
402       * absolute address mode.  That is, does it have a non-zero
403       * displacement, but no scale, no scale and no index register?
404       *
405       * @param op the operand being queried
406       * @return true if op should be assembled as absolute mode
407       */
408      boolean isAbs(Operand op) {
409        if (op instanceof MemoryOperand) {
410          MemoryOperand mop = (MemoryOperand) op;
411          return (mop.base == null) && (mop.index == null) && (!mop.disp.isZero()) && (mop.scale == 0);
412        } else {
413          return false;
414        }
415      }
416    
417      /**
418       *  Determine if a given operand is a memory operand representing
419       * register-indirect mode addressing.  This method takes an
420       * arbitrary operand, checks whether it is a memory operand, and,
421       * if it is, checks whether it should be assembled as IA32
422       * register-displacement mode.  That is, does it have a base
423       * register, but no displacement, no scale and no index
424       * register?
425       *
426       * @param op the operand being queried
427       * @return true if op should be assembled as register-indirect mode
428       */
429      boolean isRegInd(Operand op) {
430        if (op instanceof MemoryOperand) {
431          MemoryOperand mop = (MemoryOperand) op;
432          return (mop.base != null) && (mop.index == null) && (mop.disp.isZero()) && (mop.scale == 0);
433        } else {
434          return false;
435        }
436      }
437    
438      /**
439       * Determine if a given operand is a memory operand representing
440       * register-offset mode addressing.  This method takes an
441       * arbitrary operand, checks whether it is a memory operand, and,
442       * if it is, checks whether it should be assembled as IA32
443       * register-offset mode.  That is, does it have a non-zero
444       * displacement, a scale parameter and an index register, but no
445       * base register?
446       *
447       * @param op the operand being queried
448       * @return true if op should be assembled as register-offset mode
449       */
450      boolean isRegOff(Operand op) {
451        if (op instanceof MemoryOperand) {
452          MemoryOperand mop = (MemoryOperand) op;
453          return (mop.base == null) && (mop.index != null);
454        } else {
455          return false;
456        }
457      }
458    
459      /**
460       *  Determine if a given operand is a memory operand representing
461       * the full glory of scaled-index-base addressing.  This method takes an
462       * arbitrary operand, checks whether it is a memory operand, and,
463       * if it is, checks whether it should be assembled as IA32
464       * SIB mode.  That is, does it have a non-zero
465       * displacement, a scale parameter, a base register and an index
466       * register?
467       *
468       * @param op the operand being queried
469       * @return true if op should be assembled as SIB mode
470       */
471      boolean isRegIdx(Operand op) {
472        if (op instanceof MemoryOperand) {
473          return !(isAbs(op) || isRegInd(op) || isRegDisp(op) || isRegOff(op));
474        } else {
475          return false;
476        }
477      }
478    
479      /**
480       *  Return the condition bits of a given optimizing compiler
481       * condition operand.  This method returns the IA32 ISA bits
482       * representing a given condition operand, suitable for passing to
483       * the Assembler to encode into the opcode of a SET, Jcc or
484       * CMOV instruction.  This being IA32, there are of course
485       * exceptions in the binary encoding of conditions (see FCMOV),
486       * but the Assembler handles that.  This function assumes its
487       * argument is an IA32ConditionOperand, and will blow up if it
488       * is not.
489       *
490       * @param op the operand being queried
491       * @return the bits that (usually) represent the given condition
492       * in the IA32 ISA */
493      byte getCond(Operand op) {
494        return ((IA32ConditionOperand) op).value;
495      }
496    
497      /**
498       *  Is the given operand an IA32 condition operand?
499       *
500       * @param op the operand being queried
501       * @return true if op is an IA32 condition operand
502       */
503      boolean isCond(Operand op) {
504        return (op instanceof IA32ConditionOperand);
505      }
506    
507      /**
508       *  Return the label representing the target of the given branch
509       * operand.  These labels are used to represent branch targets
510       * that have not yet been assembled, and so cannot be given
511       * concrete machine code offsets.  All instructions are numbered
512       * just prior to assembly, and these numbers are used as labels.
513       * This method also returns 0 (not a valid label) for int
514       * constants to simplify generation of branches (the branch
515       * generation code will ignore this invalid label; it is used to
516       * prevent type exceptions).  This method assumes its operand is a
517       * branch operand (or an int) and will blow up if it is not.
518       *
519       * @param op the branch operand being queried
520       * @return the label representing the branch target
521       */
522      int getLabel(Operand op) {
523        if (op instanceof IntConstantOperand) {
524          // used by ImmOrLabel stuff
525          return 0;
526        } else {
527          if (op.asBranch().target.getmcOffset() < 0) {
528            return -op.asBranch().target.getmcOffset();
529          } else {
530            return -1;
531          }
532        }
533      }
534    
535      /**
536       *  Is the given operand a branch target that requires a label?
537       *
538       * @see #getLabel
539       *
540       * @param op the operand being queried
541       * @return true if it represents a branch requiring a label target
542       */
543      boolean isLabel(Operand op) {
544        return (op instanceof BranchOperand && op.asBranch().target.getmcOffset() < 0);
545      }
546    
547      /**
548       *  Is the given operand a branch target?
549       *
550       * @see #getLabel
551       * @see #isLabel
552       *
553       * @param op the operand being queried
554       * @return true if it represents a branch target
555       */
556      @NoInline
557      boolean isImmOrLabel(Operand op) {
558        // TODO: Remove NoInlinePragma, work around for leave SSA bug
559        return (isImm(op) || isLabel(op));
560      }
561    
562      /**
563       * Does the given instruction operate upon byte-sized data?  The
564       * opt compiler does not represent the size of register data, so
565       * this method typically looks at the memory operand, if any, and
566       * checks whether that is a byte.  This does not work for the
567       * size-converting moves (MOVSX and MOVZX), and those instructions
568       * use the operator convention that __b on the end of the operator
569       * name means operate upon byte data.
570       *
571       * @param inst the instruction being queried
572       * @return {@code true} if inst operates upon byte data
573       */
574      boolean isByte(Instruction inst) {
575        for(Operator opr : byteSizeOperators){
576          if (opr == inst.operator) {
577            return true;
578          }
579        }
580    
581        for (int i = 0; i < inst.getNumberOfOperands(); i++) {
582          Operand op = inst.getOperand(i);
583          if (op instanceof MemoryOperand) {
584            return (((MemoryOperand) op).size == 1);
585          }
586        }
587    
588        return false;
589      }
590    
591      /**
592       * Does the given instruction operate upon word-sized data?  The
593       * opt compiler does not represent the size of register data, so
594       * this method typically looks at the memory operand, if any, and
595       * checks whether that is a word.  This does not work for the
596       * size-converting moves (MOVSX and MOVZX), and those instructions
597       * use the operator convention that __w on the end of the operator
598       * name means operate upon word data.
599       *
600       * @param inst the instruction being queried
601       * @return true if inst operates upon word data
602       */
603      boolean isWord(Instruction inst) {
604        for(Operator opr : wordSizeOperators){
605          if (opr == inst.operator) {
606            return true;
607          }
608        }
609    
610        for (int i = 0; i < inst.getNumberOfOperands(); i++) {
611          Operand op = inst.getOperand(i);
612          if (op instanceof MemoryOperand) {
613            return (((MemoryOperand) op).size == 2);
614          }
615        }
616    
617        return false;
618      }
619    
620      /**
621       *  Does the given instruction operate upon quad-sized data?  The
622       * opt compiler does not represent the size of register data, so
623       * this method typically looks at the memory operand, if any, and
624       * checks whether that is a byte.  This method also recognizes
625       * the operator convention that __q on the end of the operator
626       * name means operate upon quad data; no operator currently uses
627       * this convention.
628       *
629       * @param inst the instruction being queried
630       * @return {@code true} if inst operates upon quad data
631       */
632      boolean isQuad(Instruction inst) {
633        for(Operator opr : quadSizeOperators){
634          if (opr == inst.operator) {
635            return true;
636          }
637        }
638    
639        for (int i = 0; i < inst.getNumberOfOperands(); i++) {
640          Operand op = inst.getOperand(i);
641          if (op instanceof MemoryOperand) {
642            return (((MemoryOperand) op).size == 8);
643          }
644        }
645    
646        return false;
647      }
648    
649      /**
650       * Given a forward branch instruction and its target,
651       * determine (conservatively) if the relative offset to the
652       * target is less than 127 bytes
653       * @param start the branch instruction
654       * @param target the value of the mcOffset of the target label
655       * @return {@code true} if the relative offset will be less than 127, false otherwise
656       */
657      protected boolean targetIsClose(Instruction start, int target) {
658        Instruction inst = start.nextInstructionInCodeOrder();
659        final int budget = 120; // slight fudge factor could be 127
660        int offset = 0;
661        while (true) {
662          if (offset <= budget) return false;
663          if (inst.getmcOffset() == target) {
664            return true;
665          }
666          offset += estimateSize(inst, offset);
667          inst = inst.nextInstructionInCodeOrder();
668        }
669      }
670    
671      protected int estimateSize(Instruction inst, int offset) {
672        switch (inst.getOpcode()) {
673          case LABEL_opcode:
674            return (4 - offset) & 3; // return size of nop required for alignment
675          case BBEND_opcode:
676          case READ_CEILING_opcode:
677          case WRITE_FLOOR_opcode:
678          case UNINT_BEGIN_opcode:
679          case UNINT_END_opcode: {
680            // these generate no code
681            return 0;
682          }
683          case IA32_METHODSTART_opcode:
684            return 12;
685          // Generated from the same case in Assembler
686          case IA32_ADC_opcode:
687          case IA32_ADD_opcode:
688          case IA32_AND_opcode:
689          case IA32_OR_opcode:
690          case IA32_SBB_opcode:
691          case IA32_XOR_opcode: {
692            int size = 2; // opcode + modr/m
693            size += operandCost(MIR_BinaryAcc.getResult(inst), true);
694            size += operandCost(MIR_BinaryAcc.getValue(inst), true);
695            return size;
696          }
697          case IA32_CMP_opcode: {
698            int size = 2; // opcode + modr/m
699            size += operandCost(MIR_Compare.getVal1(inst), true);
700            size += operandCost(MIR_Compare.getVal2(inst), true);
701            return size;
702          }
703          case IA32_TEST_opcode: {
704            int size = 2; // opcode + modr/m
705            size += operandCost(MIR_Test.getVal1(inst), false);
706            size += operandCost(MIR_Test.getVal2(inst), false);
707            return size;
708          }
709          case IA32_ADDSD_opcode:
710          case IA32_SUBSD_opcode:
711          case IA32_MULSD_opcode:
712          case IA32_DIVSD_opcode:
713          case IA32_XORPD_opcode:
714          case IA32_SQRTSD_opcode:
715          case IA32_ADDSS_opcode:
716          case IA32_SUBSS_opcode:
717          case IA32_MULSS_opcode:
718          case IA32_DIVSS_opcode:
719          case IA32_XORPS_opcode: {
720            int size = 4; // opcode + modr/m
721            Operand value = MIR_BinaryAcc.getValue(inst);
722            size += operandCost(value, false);
723            return size;
724          }
725          case IA32_UCOMISS_opcode: {
726            int size = 3; // opcode + modr/m
727            Operand val2 = MIR_Compare.getVal2(inst);
728            size += operandCost(val2, false);
729            return size;
730          }
731          case IA32_UCOMISD_opcode: {
732            int size = 4; // opcode + modr/m
733            Operand val2 = MIR_Compare.getVal2(inst);
734            size += operandCost(val2, false);
735            return size;
736          }
737          case IA32_CVTSI2SS_opcode:
738          case IA32_CVTSI2SD_opcode:
739          case IA32_CVTSS2SD_opcode:
740          case IA32_CVTSD2SS_opcode:
741          case IA32_CVTSD2SI_opcode:
742          case IA32_CVTTSD2SI_opcode:
743          case IA32_CVTSS2SI_opcode:
744          case IA32_CVTTSS2SI_opcode: {
745            int size = 4; // opcode + modr/m
746            Operand result = MIR_Unary.getResult(inst);
747            Operand value = MIR_Unary.getVal(inst);
748            size += operandCost(result, false);
749            size += operandCost(value, false);
750            return size;
751          }
752          case IA32_CMPEQSD_opcode:
753          case IA32_CMPLTSD_opcode:
754          case IA32_CMPLESD_opcode:
755          case IA32_CMPUNORDSD_opcode:
756          case IA32_CMPNESD_opcode:
757          case IA32_CMPNLTSD_opcode:
758          case IA32_CMPNLESD_opcode:
759          case IA32_CMPORDSD_opcode:
760          case IA32_CMPEQSS_opcode:
761          case IA32_CMPLTSS_opcode:
762          case IA32_CMPLESS_opcode:
763          case IA32_CMPUNORDSS_opcode:
764          case IA32_CMPNESS_opcode:
765          case IA32_CMPNLTSS_opcode:
766          case IA32_CMPNLESS_opcode:
767          case IA32_CMPORDSS_opcode: {
768            int size = 5; // opcode + modr/m + type
769            Operand value = MIR_BinaryAcc.getValue(inst);
770            size += operandCost(value, false);
771            return size;
772          }
773          case IA32_MOVD_opcode:
774          case IA32_MOVLPD_opcode:
775          case IA32_MOVQ_opcode:
776          case IA32_MOVSS_opcode:
777          case IA32_MOVSD_opcode: {
778            int size = 4; // opcode + modr/m
779            Operand result = MIR_Move.getResult(inst);
780            Operand value = MIR_Move.getValue(inst);
781            size += operandCost(result, false);
782            size += operandCost(value, false);
783            return size;
784          }
785          case IA32_PUSH_opcode: {
786            Operand op = MIR_UnaryNoRes.getVal(inst);
787            int size = 0;
788            if (op instanceof RegisterOperand) {
789              size += 1;
790            } else if (op instanceof IntConstantOperand) {
791              if (fits(((IntConstantOperand) op).value, 8)) {
792                size += 2;
793              } else {
794                size += 5;
795              }
796            } else {
797              size += (2 + operandCost(op, true));
798            }
799            return size;
800          }
801          case IA32_LEA_opcode: {
802            int size = 2; // opcode + 1 byte modr/m
803            size += operandCost(MIR_Lea.getResult(inst), false);
804            size += operandCost(MIR_Lea.getValue(inst), false);
805            return size;
806          }
807          case IA32_MOV_opcode: {
808            int size = 2; // opcode + modr/m
809            Operand result = MIR_Move.getResult(inst);
810            Operand value = MIR_Move.getValue(inst);
811            size += operandCost(result, false);
812            size += operandCost(value, false);
813            return size;
814          }
815          case MIR_LOWTABLESWITCH_opcode:
816            return MIR_LowTableSwitch.getNumberOfTargets(inst)*4 + 14;
817          case IA32_OFFSET_opcode:
818            return 4;
819          case IA32_JCC_opcode:
820          case IA32_JMP_opcode:
821            return 6; // assume long form
822          case IA32_LOCK_opcode:
823            return 1;
824          case IG_PATCH_POINT_opcode:
825            return 8;
826          case IA32_INT_opcode:
827            return 2;
828          case IA32_RET_opcode:
829            return 3;
830          case IA32_CALL_opcode:
831            Operand target = MIR_Call.getTarget(inst);
832            if (isImmOrLabel(target)) {
833              return 5; // opcode + 32bit immediate
834            } else {
835              return 2 + operandCost(target, false); // opcode + modr/m
836            }
837          default: {
838            int size = 3; // 2 bytes opcode + 1 byte modr/m
839            for (Enumeration<Operand> opEnum = inst.getRootOperands(); opEnum.hasMoreElements();) {
840              Operand op = opEnum.nextElement();
841              size += operandCost(op, false);
842            }
843            return size;
844          }
845        }
846      }
847    
848      private int operandCost(Operand op, boolean shortFormImmediate) {
849        if (op instanceof MemoryOperand) {
850          MemoryOperand mop = (MemoryOperand) op;
851          // If it's a 2byte mem location, we're going to need an override prefix
852          int prefix = mop.size == 2 ? 1 : 0;
853    
854          // Deal with EBP wierdness
855          if (mop.base != null && mop.base.getRegister() == EBP) {
856            if (mop.index != null) {
857              // forced into SIB + 32 bit displacement no matter what disp is
858              return prefix + 5;
859            }
860            if (fits(mop.disp, 8)) {
861              return prefix + 1;
862            } else {
863              return prefix + 4;
864            }
865          }
866          if (mop.index != null && mop.index.getRegister() == EBP) {
867            // forced into SIB + 32 bit displacement no matter what disp is
868            return prefix + 5;
869          }
870    
871          // Deal with ESP wierdness -- requires SIB byte even when index is null
872          if (mop.base != null && mop.base.getRegister() == ESP) {
873            if (fits(mop.disp, 8)) {
874              return prefix + 2;
875            } else {
876              return prefix + 5;
877            }
878          }
879    
880          if (mop.index == null) {
881            // just displacement to worry about
882            if (mop.disp.isZero()) {
883              return prefix + 0;
884            } else if (fits(mop.disp, 8)) {
885              return prefix + 1;
886            } else {
887              return prefix + 4;
888            }
889          } else {
890            // have a SIB
891            if (mop.base == null && mop.scale != 0) {
892              // forced to 32 bit displacement even if it would fit in 8
893              return prefix + 5;
894            } else {
895              if (mop.disp.isZero()) {
896                return prefix + 1;
897              } else if (fits(mop.disp, 8)) {
898                return prefix + 2;
899              } else {
900                return prefix + 5;
901              }
902            }
903          }
904        } else if (op instanceof IntConstantOperand) {
905          if (shortFormImmediate && fits(((IntConstantOperand) op).value, 8)) {
906            return 1;
907          } else {
908            return 4;
909          }
910        } else {
911          return 0;
912        }
913      }
914    
915      /**
916       * Emit the given instruction, assuming that
917       * it is a MIR_CondBranch instruction
918       * and has a JCC operator
919       *
920       * @param inst the instruction to assemble
921       */
922      protected void doJCC(Instruction inst) {
923        byte cond = getCond(MIR_CondBranch.getCond(inst));
924        if (isImm(MIR_CondBranch.getTarget(inst))) {
925          emitJCC_Cond_Imm(cond, getImm(MIR_CondBranch.getTarget(inst)));
926        } else {
927          if (VM.VerifyAssertions && !isLabel(MIR_CondBranch.getTarget(inst))) VM._assert(VM.NOT_REACHED, inst.toString());
928          int sourceLabel = -inst.getmcOffset();
929          int targetLabel = getLabel(MIR_CondBranch.getTarget(inst));
930          int delta = targetLabel - sourceLabel;
931          if (VM.VerifyAssertions) VM._assert(delta >= 0);
932          if (delta < 10 || (delta < 90 && targetIsClose(inst, -targetLabel))) {
933            int miStart = mi;
934            ForwardReference r = new ForwardReference.ShortBranch(mi, targetLabel);
935            forwardRefs = ForwardReference.enqueue(forwardRefs, r);
936            setMachineCodes(mi++, (byte) (0x70 + cond));
937            mi += 1; // leave space for displacement
938            if (lister != null) lister.I(miStart, "J" + CONDITION[cond], 0);
939          } else {
940            emitJCC_Cond_Label(cond, targetLabel);
941          }
942        }
943      }
944    
945      /**
946       * Emit the given instruction, assuming that
947       * it is a MIR_Branch instruction
948       * and has a JMP operator
949       *
950       * @param inst the instruction to assemble
951       */
952      protected void doJMP(Instruction inst) {
953        if (isImm(MIR_Branch.getTarget(inst))) {
954          emitJMP_Imm(getImm(MIR_Branch.getTarget(inst)));
955        } else if (isLabel(MIR_Branch.getTarget(inst))) {
956          int sourceLabel = -inst.getmcOffset();
957          int targetLabel = getLabel(MIR_Branch.getTarget(inst));
958          int delta = targetLabel - sourceLabel;
959          if (VM.VerifyAssertions) VM._assert(delta >= 0);
960          if (delta < 10 || (delta < 90 && targetIsClose(inst, -targetLabel))) {
961            int miStart = mi;
962            ForwardReference r = new ForwardReference.ShortBranch(mi, targetLabel);
963            forwardRefs = ForwardReference.enqueue(forwardRefs, r);
964            setMachineCodes(mi++, (byte) 0xEB);
965            mi += 1; // leave space for displacement
966            if (lister != null) lister.I(miStart, "JMP", 0);
967          } else {
968            emitJMP_Label(getLabel(MIR_Branch.getTarget(inst)));
969          }
970        } else if (isReg(MIR_Branch.getTarget(inst))) {
971          emitJMP_Reg(getGPR_Reg(MIR_Branch.getTarget(inst)));
972        } else if (isAbs(MIR_Branch.getTarget(inst))) {
973          emitJMP_Abs(getDisp(MIR_Branch.getTarget(inst)).toWord().toAddress());
974        } else if (isRegDisp(MIR_Branch.getTarget(inst))) {
975          emitJMP_RegDisp(getBase(MIR_Branch.getTarget(inst)), getDisp(MIR_Branch.getTarget(inst)));
976        } else if (isRegOff(MIR_Branch.getTarget(inst))) {
977          emitJMP_RegOff(getIndex(MIR_Branch.getTarget(inst)),
978                         getScale(MIR_Branch.getTarget(inst)),
979                         getDisp(MIR_Branch.getTarget(inst)));
980        } else if (isRegIdx(MIR_Branch.getTarget(inst))) {
981          emitJMP_RegIdx(getBase(MIR_Branch.getTarget(inst)),
982                         getIndex(MIR_Branch.getTarget(inst)),
983                         getScale(MIR_Branch.getTarget(inst)),
984                         getDisp(MIR_Branch.getTarget(inst)));
985        } else if (isRegInd(MIR_Branch.getTarget(inst))) {
986          emitJMP_RegInd(getBase(MIR_Branch.getTarget(inst)));
987        } else {
988          if (VM.VerifyAssertions) VM._assert(VM.NOT_REACHED, inst.toString());
989        }
990      }
991    
992      /**
993       * Emit the given instruction, assuming that
994       * it is a MIR_LowTableSwitch instruction
995       * and has a MIR_LOWTABLESWITCH operator
996       *
997       * @param inst the instruction to assemble
998       */
999      protected void doLOWTABLESWITCH(Instruction inst) {
1000        int n = MIR_LowTableSwitch.getNumberOfTargets(inst); // n = number of normal cases (0..n-1)
1001        GPR ms = GPR.lookup(MIR_LowTableSwitch.getMethodStart(inst).getRegister().number);
1002        GPR idx = GPR.lookup(MIR_LowTableSwitch.getIndex(inst).getRegister().number);
1003        // idx += [ms + idx<<2 + ??] - we will patch ?? when we know the placement of the table
1004        int toPatchAddress = getMachineCodeIndex();
1005        if (VM.buildFor32Addr()) {
1006          emitMOV_Reg_RegIdx(idx, ms, idx, Assembler.WORD, Offset.fromIntZeroExtend(Integer.MAX_VALUE));
1007          emitADD_Reg_Reg(idx, ms);
1008        } else {
1009          emitMOV_Reg_RegIdx(idx, ms, idx, Assembler.WORD, Offset.fromIntZeroExtend(Integer.MAX_VALUE));
1010          emitADD_Reg_Reg_Quad(idx, ms);
1011        }
1012        // JMP T0
1013        emitJMP_Reg(idx);
1014        emitNOP((4-getMachineCodeIndex()) & 3); // align table
1015        // create table of offsets from start of method
1016        patchSwitchTableDisplacement(toPatchAddress);
1017        for (int i = 0; i < n; i++) {
1018          Operand target = MIR_LowTableSwitch.getTarget(inst, i);
1019          emitOFFSET_Imm_ImmOrLabel(i, getImm(target), getLabel(target));
1020        }
1021      }
1022    
1023      /**
1024       * Debugging support (return a printable representation of the machine code).
1025       *
1026       * @param instr  An integer to be interpreted as a PowerPC instruction
1027       * @param offset the mcoffset (in bytes) of the instruction
1028       */
1029      public String disasm(int instr, int offset) {
1030        OptimizingCompilerException.TODO("Assembler: disassembler");
1031        return null;
1032      }
1033    
1034      /**
1035       * generate machine code into ir.machinecode.
1036       * @param ir the IR to generate
1037       * @param shouldPrint should we print the machine code?
1038       * @return the number of machinecode instructions generated
1039       */
1040      public static int generateCode(IR ir, boolean shouldPrint) {
1041        int count = 0;
1042        AssemblerOpt asm = new AssemblerOpt(count, shouldPrint, ir);
1043    
1044        for (Instruction p = ir.firstInstructionInCodeOrder(); p != null; p = p.nextInstructionInCodeOrder()) {
1045          // Set the mc offset of all instructions to their negative position.
1046          // A positive value in their position means they have been created
1047          // by the assembler.
1048          count++;
1049          p.setmcOffset(-count);
1050          if (p.operator() == Operators.MIR_LOWTABLESWITCH) {
1051            // Table switch kludge, as these will occupy multiple slots in the
1052            // generated assembler
1053            count += MIR_LowTableSwitch.getNumberOfTargets(p);
1054          }
1055        }
1056    
1057        for (Instruction p = ir.firstInstructionInCodeOrder(); p != null; p = p.nextInstructionInCodeOrder()) {
1058          if (DEBUG_ESTIMATE) {
1059            int start = asm.getMachineCodeIndex();
1060            int estimate = asm.estimateSize(p, start);
1061            asm.doInst(p);
1062            int end = asm.getMachineCodeIndex();
1063            if (end - start > estimate) {
1064              VM.sysWriteln("Bad estimate: " + (end - start) + " " + estimate + " " + p);
1065              VM.sysWrite("\tMachine code: ");
1066              asm.writeLastInstruction(start);
1067              VM.sysWriteln();
1068            }
1069          } else {
1070            asm.doInst(p);
1071          }
1072        }
1073    
1074        ir.MIRInfo.machinecode = asm.getMachineCodes();
1075    
1076        return ir.MIRInfo.machinecode.length();
1077      }
1078    
1079    }