Assembler.h   Assembler.h 
skipping to change at line 137 skipping to change at line 137
void validate(); void validate();
bool isValidEntry(uint32_t idx, LIns* ins) const; /* return true if f idx and ins are matched */ bool isValidEntry(uint32_t idx, LIns* ins) const; /* return true if f idx and ins are matched */
void checkForResourceConsistency(const RegAlloc& regs); void checkForResourceConsistency(const RegAlloc& regs);
void checkForResourceLeaks() const; void checkForResourceLeaks() const;
#endif #endif
class Iter class Iter
{ {
private: private:
const AR& _ar; const AR& _ar;
// '_i' points to the start of the entries for an LIns, or to t he first NULL entry.
uint32_t _i; uint32_t _i;
public: public:
inline Iter(const AR& ar) : _ar(ar), _i(1) { } inline Iter(const AR& ar) : _ar(ar), _i(1) { }
bool next(LIns*& ins, uint32_t& nStackSlots, int32_t& offset); // get the next one (moves iterator forward) bool next(LIns*& ins, uint32_t& nStackSlots, int32_t& offset); // get the next one (moves iterator forward)
}; };
}; };
inline AR::AR() inline AR::AR()
{ {
_entries[0] = NULL; _entries[0] = NULL;
skipping to change at line 297 skipping to change at line 298
#endif // NJ_VERBOSE #endif // NJ_VERBOSE
public: public:
#ifdef VTUNE #ifdef VTUNE
avmplus::CodegenLIR *cgen; avmplus::CodegenLIR *cgen;
#endif #endif
Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator & alloc, AvmCore* core, LogControl* logc, const Config& config); Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator & alloc, AvmCore* core, LogControl* logc, const Config& config);
void compile(Fragment *frag, Allocator& alloc, bool opti mize void compile(Fragment *frag, Allocator& alloc, bool opti mize
verbose_only(, LabelMap*)); verbose_only(, LInsPrinter*));
void endAssembly(Fragment* frag); void endAssembly(Fragment* frag);
void assemble(Fragment* frag, LirFilter* reader); void assemble(Fragment* frag, LirFilter* reader);
void beginAssembly(Fragment *frag); void beginAssembly(Fragment *frag);
void releaseRegisters(); void releaseRegisters();
void patch(GuardRecord *lr); void patch(GuardRecord *lr);
void patch(SideExit *exit); void patch(SideExit *exit);
#ifdef NANOJIT_IA32 #ifdef NANOJIT_IA32
void patch(SideExit *exit, SwitchInfo* si); void patch(SideExit *exit, SwitchInfo* si);
skipping to change at line 338 skipping to change at line 339
uint32_t arReserve(LIns* ins); uint32_t arReserve(LIns* ins);
void arFree(LIns* ins); void arFree(LIns* ins);
void arReset(); void arReset();
Register registerAlloc(LIns* ins, RegisterMask allow, Regist erMask prefer); Register registerAlloc(LIns* ins, RegisterMask allow, Regist erMask prefer);
Register registerAllocTmp(RegisterMask allow); Register registerAllocTmp(RegisterMask allow);
void registerResetAll(); void registerResetAll();
void evictAllActiveRegs(); void evictAllActiveRegs();
void evictSomeActiveRegs(RegisterMask regs); void evictSomeActiveRegs(RegisterMask regs);
void evictScratchRegs(); void evictScratchRegsExcept(RegisterMask ignore);
void intersectRegisterState(RegAlloc& saved); void intersectRegisterState(RegAlloc& saved);
void unionRegisterState(RegAlloc& saved); void unionRegisterState(RegAlloc& saved);
void assignSaved(RegAlloc &saved, RegisterMask skip); void assignSaved(RegAlloc &saved, RegisterMask skip);
LInsp findVictim(RegisterMask allow); LInsp findVictim(RegisterMask allow);
Register getBaseReg(LIns *i, int &d, RegisterMask allow); Register getBaseReg(LIns *ins, int &d, RegisterMask allow);
void getBaseReg2(RegisterMask allowValue, LIns* value, R egister& rv, void getBaseReg2(RegisterMask allowValue, LIns* value, R egister& rv,
RegisterMask allowBase, LIns* base, Reg ister& rb, int &d); RegisterMask allowBase, LIns* base, Reg ister& rb, int &d);
#if NJ_USES_QUAD_CONSTANTS #if NJ_USES_QUAD_CONSTANTS
const uint64_t* const uint64_t*
findQuadConstant(uint64_t q); findQuadConstant(uint64_t q);
#endif #endif
int findMemFor(LIns* i); int findMemFor(LIns* ins);
Register findRegFor(LIns* i, RegisterMask allow); Register findRegFor(LIns* ins, RegisterMask allow);
void findRegFor2(RegisterMask allowa, LIns* ia, Register &ra, void findRegFor2(RegisterMask allowa, LIns* ia, Register &ra,
RegisterMask allowb, LIns *ib, Register &rb); RegisterMask allowb, LIns *ib, Register &rb);
Register findSpecificRegFor(LIns* i, Register r); Register findSpecificRegFor(LIns* ins, Register r);
Register findSpecificRegForUnallocated(LIns* i, Register r); Register findSpecificRegForUnallocated(LIns* ins, Register r
Register deprecated_prepResultReg(LIns *i, RegisterMask allo );
w); Register deprecated_prepResultReg(LIns *ins, RegisterMask al
Register prepareResultReg(LIns *i, RegisterMask allow); low);
void deprecated_freeRsrcOf(LIns *i, bool pop); Register prepareResultReg(LIns *ins, RegisterMask allow);
void deprecated_freeRsrcOf(LIns *ins);
void freeResourcesOf(LIns *ins); void freeResourcesOf(LIns *ins);
void evictIfActive(Register r); void evictIfActive(Register r);
void evict(LIns* vic); void evict(LIns* vic);
RegisterMask hint(LIns* ins); // mask==0 means there's no pre ferred register(s) RegisterMask hint(LIns* ins); // mask==0 means there's no pre ferred register(s)
void codeAlloc(NIns *&start, NIns *&end, NIns *&eip void codeAlloc(NIns *&start, NIns *&end, NIns *&eip
verbose_only(, size_t &nBytes)); verbose_only(, size_t &nBytes));
bool canRemat(LIns*); bool canRemat(LIns*);
bool isKnownReg(Register r) { bool deprecated_isKnownReg(Register r) {
return r != deprecated_UnknownReg; return r != deprecated_UnknownReg;
} }
Allocator& alloc; // for items with same lifetime as this Assembler Allocator& alloc; // for items with same lifetime as this Assembler
CodeAlloc& _codeAlloc; // for code we generate CodeAlloc& _codeAlloc; // for code we generate
Allocator& _dataAlloc; // for data used by gen erated code Allocator& _dataAlloc; // for data used by gen erated code
Fragment* _thisfrag; Fragment* _thisfrag;
RegAllocMap _branchStateMap; RegAllocMap _branchStateMap;
NInsMap _patches; NInsMap _patches;
LabelStateMap _labels; LabelStateMap _labels;
skipping to change at line 413 skipping to change at line 414
#define SWAP(t, a, b) do { t tmp = a; a = b; b = tmp; } w hile (0) #define SWAP(t, a, b) do { t tmp = a; a = b; b = tmp; } w hile (0)
void swapCodeChunks(); void swapCodeChunks();
NIns* _epilogue; NIns* _epilogue;
AssmError _err; // 0 = means assemble() appears ok, otherwise it failed AssmError _err; // 0 = means assemble() appears ok, otherwise it failed
#if PEDANTIC #if PEDANTIC
NIns* pedanticTop; NIns* pedanticTop;
#endif #endif
// Instruction lookahead in gen(). lookahead[0] is the current
// instruction. Nb: lookahead[1..N_LOOKAHEAD] may include dead
// instructions, but we won't know that they're dead yet.
static const int N_LOOKAHEAD = 3;
LInsp lookahead[N_LOOKAHEAD];
AR _activation; AR _activation;
RegAlloc _allocator; RegAlloc _allocator;
verbose_only( void asm_inc_m32(uint32_t*); ) verbose_only( void asm_inc_m32(uint32_t*); )
void asm_mmq(Register rd, int dd, Register rs, int ds); void asm_mmq(Register rd, int dd, Register rs, int ds);
NIns* asm_exit(LInsp guard); NIns* asm_exit(LInsp guard);
NIns* asm_leave_trace(LInsp guard); NIns* asm_leave_trace(LInsp guard);
void asm_store32(LOpcode op, LIns *val, int d, LIns *bas e); void asm_store32(LOpcode op, LIns *val, int d, LIns *bas e);
void asm_store64(LOpcode op, LIns *val, int d, LIns *bas e); void asm_store64(LOpcode op, LIns *val, int d, LIns *bas e);
void asm_restore(LInsp, Register); void asm_restore(LInsp, Register);
void asm_spilli(LInsp i, bool pop); void asm_maybe_spill(LInsp ins, bool pop);
void asm_spill(Register rr, int d, bool pop, bool quad); void asm_spill(Register rr, int d, bool pop, bool quad);
void asm_load64(LInsp i); void asm_load64(LInsp ins);
void asm_ret(LInsp p); void asm_ret(LInsp ins);
void asm_quad(LInsp i); #ifdef NANOJIT_64BIT
void asm_fcond(LInsp i); void asm_immq(LInsp ins);
void asm_cond(LInsp i); #endif
void asm_arith(LInsp i); void asm_immf(LInsp ins);
void asm_neg_not(LInsp i); void asm_fcond(LInsp ins);
void asm_load32(LInsp i); void asm_cond(LInsp ins);
void asm_cmov(LInsp i); void asm_arith(LInsp ins);
void asm_param(LInsp i); void asm_neg_not(LInsp ins);
void asm_int(LInsp i); void asm_load32(LInsp ins);
void asm_cmov(LInsp ins);
void asm_param(LInsp ins);
void asm_immi(LInsp ins);
#if NJ_SOFTFLOAT_SUPPORTED #if NJ_SOFTFLOAT_SUPPORTED
void asm_qlo(LInsp i); void asm_qlo(LInsp ins);
void asm_qhi(LInsp i); void asm_qhi(LInsp ins);
void asm_qjoin(LIns *ins); void asm_qjoin(LIns *ins);
#endif #endif
void asm_fneg(LInsp ins); void asm_fneg(LInsp ins);
void asm_fop(LInsp ins); void asm_fop(LInsp ins);
void asm_i2f(LInsp ins); void asm_i2f(LInsp ins);
void asm_u2f(LInsp ins); void asm_u2f(LInsp ins);
void asm_f2i(LInsp ins); void asm_f2i(LInsp ins);
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
void asm_q2i(LInsp ins); void asm_q2i(LInsp ins);
void asm_promote(LIns *ins); void asm_promote(LIns *ins);
 End of changes. 11 change blocks. 
26 lines changed or deleted 37 lines changed or added


 LIR.h   LIR.h 
skipping to change at line 51 skipping to change at line 51
#define __nanojit_LIR__ #define __nanojit_LIR__
namespace nanojit namespace nanojit
{ {
enum LOpcode enum LOpcode
#if defined(_MSC_VER) && _MSC_VER >= 1400 #if defined(_MSC_VER) && _MSC_VER >= 1400
#pragma warning(disable:4480) // nonstandard extension used: specifying und erlying type for enum #pragma warning(disable:4480) // nonstandard extension used: specifying und erlying type for enum
: unsigned : unsigned
#endif #endif
{ {
#define OP___(op, number, repKind, retType) \ #define OP___(op, number, repKind, retType, isCse) \
LIR_##op = (number), LIR_##op = (number),
#include "LIRopcode.tbl" #include "LIRopcode.tbl"
LIR_sentinel, LIR_sentinel,
#undef OP___ #undef OP___
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
# define PTR_SIZE(a,b) b # define PTR_SIZE(a,b) b
#else #else
# define PTR_SIZE(a,b) a # define PTR_SIZE(a,b) a
#endif #endif
// pointer op aliases // Pointer-sized synonyms.
LIR_ldp = PTR_SIZE(LIR_ld, LIR_ldq),
LIR_ldcp = PTR_SIZE(LIR_ldc, LIR_ldqc), LIR_paramp = PTR_SIZE(LIR_paraml, LIR_paramq),
LIR_allocp = PTR_SIZE(LIR_allocl, LIR_allocq),
LIR_retp = PTR_SIZE(LIR_retl, LIR_retq),
LIR_livep = PTR_SIZE(LIR_livel, LIR_liveq),
LIR_ldp = PTR_SIZE(LIR_ldl, LIR_ldq),
LIR_stp = PTR_SIZE(LIR_stl, LIR_stq),
LIR_callp = PTR_SIZE(LIR_calll, LIR_callq),
LIR_eqp = PTR_SIZE(LIR_eql, LIR_eqq),
LIR_ltp = PTR_SIZE(LIR_ltl, LIR_ltq),
LIR_gtp = PTR_SIZE(LIR_gtl, LIR_gtq),
LIR_lep = PTR_SIZE(LIR_lel, LIR_leq),
LIR_gep = PTR_SIZE(LIR_gel, LIR_geq),
LIR_ltup = PTR_SIZE(LIR_ltul, LIR_ltuq),
LIR_gtup = PTR_SIZE(LIR_gtul, LIR_gtuq),
LIR_leup = PTR_SIZE(LIR_leul, LIR_leuq),
LIR_geup = PTR_SIZE(LIR_geul, LIR_geuq),
LIR_addp = PTR_SIZE(LIR_addl, LIR_addq),
LIR_andp = PTR_SIZE(LIR_andl, LIR_andq),
LIR_orp = PTR_SIZE(LIR_orl, LIR_orq),
LIR_xorp = PTR_SIZE(LIR_xorl, LIR_xorq),
LIR_lshp = PTR_SIZE(LIR_lshl, LIR_lshq),
LIR_rshp = PTR_SIZE(LIR_rshl, LIR_rshq),
LIR_rshup = PTR_SIZE(LIR_rshul, LIR_rshuq),
LIR_cmovp = PTR_SIZE(LIR_cmovl, LIR_cmovq),
// XXX: temporary synonyms for old opcode names and old pointer-siz
ed
// synonyms, for the Great Opcode Renaming transition period (bug
// 504506). Those in comments have not changed and so don't need a
// temporary synonym.
// LIR_start
// LIR_regfence
// LIR_skip
#ifndef NANOJIT_64BIT
LIR_iparam = LIR_paraml,
#else
LIR_qparam = LIR_paramq,
#endif
#ifndef NANOJIT_64BIT
LIR_ialloc = LIR_allocl,
#else
LIR_qalloc = LIR_allocq,
#endif
LIR_ret = LIR_retl,
#ifdef NANOJIT_64BIT
LIR_qret = LIR_retq,
#endif
LIR_fret = LIR_retd,
LIR_live = LIR_livel,
#ifdef NANOJIT_64BIT
LIR_qlive = LIR_liveq,
#endif
LIR_flive = LIR_lived,
// file
// line
LIR_ldsb = LIR_ldb2l,
LIR_ldss = LIR_ldw2l,
LIR_ldzb = LIR_ldub2ul,
LIR_ldzs = LIR_lduw2ul,
LIR_ld = LIR_ldl,
// LIR_ldq
LIR_ldf = LIR_ldd,
LIR_ld32f = LIR_lds2d,
// LIR_stb
LIR_sts = LIR_stw,
LIR_sti = LIR_stl,
#ifdef NANOJIT_64BIT
LIR_stqi = LIR_stq,
#endif
LIR_stfi = LIR_std,
LIR_st32f = LIR_std2s,
LIR_icall = LIR_calll,
#ifdef NANOJIT_64BIT
LIR_qcall = LIR_callq,
#endif
LIR_fcall = LIR_calld,
// LIR_j
// LIR_jt
// LIR_jf
// LIR_jtbl
// LIR_label = LIR_label
// LIR_x
// LIR_xt
// LIR_xf
// LIR_xtbl
// LIR_xbarrier
LIR_int = LIR_imml,
#ifdef NANOJIT_64BIT
LIR_quad = LIR_immq,
#endif
LIR_float = LIR_immd,
LIR_eq = LIR_eql,
LIR_lt = LIR_ltl,
LIR_gt = LIR_gtl,
LIR_le = LIR_lel,
LIR_ge = LIR_gel,
LIR_ult = LIR_ltul,
LIR_ugt = LIR_gtul,
LIR_ule = LIR_leul,
LIR_uge = LIR_geul,
#ifdef NANOJIT_64BIT
LIR_qeq = LIR_eqq,
LIR_qlt = LIR_ltq,
LIR_qgt = LIR_gtq,
LIR_qle = LIR_leq,
LIR_qge = LIR_geq,
LIR_qult = LIR_ltuq,
LIR_qugt = LIR_gtuq,
LIR_qule = LIR_leuq,
LIR_quge = LIR_geuq,
#endif
LIR_feq = LIR_eqd,
LIR_flt = LIR_ltd,
LIR_fgt = LIR_gtd,
LIR_fle = LIR_led,
LIR_fge = LIR_ged,
LIR_neg = LIR_negl,
LIR_add = LIR_addl,
LIR_sub = LIR_subl,
LIR_mul = LIR_mull,
#if defined NANOJIT_IA32 || defined NANOJIT_X64
LIR_div = LIR_divl,
LIR_mod = LIR_modl,
#endif
LIR_not = LIR_notl,
LIR_and = LIR_andl,
LIR_or = LIR_orl,
LIR_xor = LIR_xorl,
LIR_lsh = LIR_lshl,
LIR_rsh = LIR_rshl,
LIR_ush = LIR_rshul,
#ifdef NANOJIT_64BIT
LIR_qiadd = LIR_addq,
LIR_qiand = LIR_andq,
LIR_qior = LIR_orq,
LIR_qxor = LIR_xorq,
LIR_qilsh = LIR_lshq,
LIR_qirsh = LIR_rshq,
LIR_qursh = LIR_rshuq,
#endif
LIR_fneg = LIR_negd,
LIR_fadd = LIR_addd,
LIR_fsub = LIR_subd,
LIR_fmul = LIR_muld,
LIR_fdiv = LIR_divd,
LIR_fmod = LIR_modd,
LIR_cmov = LIR_cmovl,
#ifdef NANOJIT_64BIT
LIR_qcmov = LIR_cmovq,
#endif
#ifdef NANOJIT_64BIT
LIR_i2q = LIR_l2q,
LIR_u2q = LIR_ul2uq,
LIR_q2i = LIR_q2l,
#endif
LIR_i2f = LIR_l2d,
LIR_u2f = LIR_ul2d,
LIR_f2i = LIR_d2l,
LIR_addxov = LIR_addxovl,
LIR_subxov = LIR_subxovl,
LIR_mulxov = LIR_mulxovl,
#if NJ_SOFTFLOAT_SUPPORTED
LIR_qlo = LIR_dlo2l,
LIR_qhi = LIR_dhi2l,
LIR_qjoin = LIR_ll2d,
LIR_callh = LIR_hcalll,
#endif
LIR_param = PTR_SIZE(LIR_iparam, LIR_qparam),
LIR_alloc = PTR_SIZE(LIR_ialloc, LIR_qalloc),
LIR_pret = PTR_SIZE(LIR_ret, LIR_qret),
LIR_plive = PTR_SIZE(LIR_live, LIR_qlive),
LIR_stpi = PTR_SIZE(LIR_sti, LIR_stqi), LIR_stpi = PTR_SIZE(LIR_sti, LIR_stqi),
LIR_piadd = PTR_SIZE(LIR_add, LIR_qiadd),
LIR_piand = PTR_SIZE(LIR_and, LIR_qiand), LIR_pcall = PTR_SIZE(LIR_icall, LIR_qcall),
LIR_pilsh = PTR_SIZE(LIR_lsh, LIR_qilsh),
LIR_pirsh = PTR_SIZE(LIR_rsh, LIR_qirsh),
LIR_pursh = PTR_SIZE(LIR_ush, LIR_qursh),
LIR_pcmov = PTR_SIZE(LIR_cmov, LIR_qcmov),
LIR_pior = PTR_SIZE(LIR_or, LIR_qior),
LIR_pxor = PTR_SIZE(LIR_xor, LIR_qxor),
LIR_addp = PTR_SIZE(LIR_iaddp, LIR_qaddp),
LIR_peq = PTR_SIZE(LIR_eq, LIR_qeq), LIR_peq = PTR_SIZE(LIR_eq, LIR_qeq),
LIR_plt = PTR_SIZE(LIR_lt, LIR_qlt), LIR_plt = PTR_SIZE(LIR_lt, LIR_qlt),
LIR_pgt = PTR_SIZE(LIR_gt, LIR_qgt), LIR_pgt = PTR_SIZE(LIR_gt, LIR_qgt),
LIR_ple = PTR_SIZE(LIR_le, LIR_qle), LIR_ple = PTR_SIZE(LIR_le, LIR_qle),
LIR_pge = PTR_SIZE(LIR_ge, LIR_qge), LIR_pge = PTR_SIZE(LIR_ge, LIR_qge),
LIR_pult = PTR_SIZE(LIR_ult, LIR_qult), LIR_pult = PTR_SIZE(LIR_ult, LIR_qult),
LIR_pugt = PTR_SIZE(LIR_ugt, LIR_qugt), LIR_pugt = PTR_SIZE(LIR_ugt, LIR_qugt),
LIR_pule = PTR_SIZE(LIR_ule, LIR_qule), LIR_pule = PTR_SIZE(LIR_ule, LIR_qule),
LIR_puge = PTR_SIZE(LIR_uge, LIR_quge), LIR_puge = PTR_SIZE(LIR_uge, LIR_quge),
LIR_alloc = PTR_SIZE(LIR_ialloc, LIR_qalloc), LIR_piadd = PTR_SIZE(LIR_add, LIR_qiadd),
LIR_pcall = PTR_SIZE(LIR_icall, LIR_qcall),
LIR_param = PTR_SIZE(LIR_iparam, LIR_qparam), LIR_piand = PTR_SIZE(LIR_and, LIR_qiand),
LIR_plive = PTR_SIZE(LIR_live, LIR_qlive), LIR_pior = PTR_SIZE(LIR_or, LIR_qior),
LIR_pret = PTR_SIZE(LIR_ret, LIR_qret) LIR_pxor = PTR_SIZE(LIR_xor, LIR_qxor),
LIR_pilsh = PTR_SIZE(LIR_lsh, LIR_qilsh),
LIR_pirsh = PTR_SIZE(LIR_rsh, LIR_qirsh),
LIR_pursh = PTR_SIZE(LIR_ush, LIR_qursh),
LIR_pcmov = PTR_SIZE(LIR_cmov, LIR_qcmov)
}; };
// 32-bit integer comparisons must be contiguous, as must 64-bit intege
r
// comparisons and 64-bit float comparisons.
NanoStaticAssert(LIR_eq + 1 == LIR_lt &&
LIR_eq + 2 == LIR_gt &&
LIR_eq + 3 == LIR_le &&
LIR_eq + 4 == LIR_ge &&
LIR_eq + 5 == LIR_ult &&
LIR_eq + 6 == LIR_ugt &&
LIR_eq + 7 == LIR_ule &&
LIR_eq + 8 == LIR_uge);
#ifdef NANOJIT_64BIT
NanoStaticAssert(LIR_qeq + 1 == LIR_qlt &&
LIR_qeq + 2 == LIR_qgt &&
LIR_qeq + 3 == LIR_qle &&
LIR_qeq + 4 == LIR_qge &&
LIR_qeq + 5 == LIR_qult &&
LIR_qeq + 6 == LIR_qugt &&
LIR_qeq + 7 == LIR_qule &&
LIR_qeq + 8 == LIR_quge);
#endif
NanoStaticAssert(LIR_feq + 1 == LIR_flt &&
LIR_feq + 2 == LIR_fgt &&
LIR_feq + 3 == LIR_fle &&
LIR_feq + 4 == LIR_fge);
// Various opcodes must be changeable to their opposite with op^1
// (although we use invertXyz() when possible, ie. outside static
// assertions).
NanoStaticAssert((LIR_jt^1) == LIR_jf && (LIR_jf^1) == LIR_jt);
NanoStaticAssert((LIR_xt^1) == LIR_xf && (LIR_xf^1) == LIR_xt);
NanoStaticAssert((LIR_lt^1) == LIR_gt && (LIR_gt^1) == LIR_lt);
NanoStaticAssert((LIR_le^1) == LIR_ge && (LIR_ge^1) == LIR_le);
NanoStaticAssert((LIR_ult^1) == LIR_ugt && (LIR_ugt^1) == LIR_ult);
NanoStaticAssert((LIR_ule^1) == LIR_uge && (LIR_uge^1) == LIR_ule);
#ifdef NANOJIT_64BIT
NanoStaticAssert((LIR_qlt^1) == LIR_qgt && (LIR_qgt^1) == LIR_qlt);
NanoStaticAssert((LIR_qle^1) == LIR_qge && (LIR_qge^1) == LIR_qle);
NanoStaticAssert((LIR_qult^1) == LIR_qugt && (LIR_qugt^1) == LIR_qult);
NanoStaticAssert((LIR_qule^1) == LIR_quge && (LIR_quge^1) == LIR_qule);
#endif
NanoStaticAssert((LIR_flt^1) == LIR_fgt && (LIR_fgt^1) == LIR_flt);
NanoStaticAssert((LIR_fle^1) == LIR_fge && (LIR_fge^1) == LIR_fle);
struct GuardRecord; struct GuardRecord;
struct SideExit; struct SideExit;
enum AbiKind { enum AbiKind {
ABI_FASTCALL, ABI_FASTCALL,
ABI_THISCALL, ABI_THISCALL,
ABI_STDCALL, ABI_STDCALL,
ABI_CDECL ABI_CDECL
}; };
enum ArgSize { // All values must fit into three bits. See CallInfo for details.
ARGSIZE_NONE = 0, enum ArgType {
ARGSIZE_F = 1, // double (64bit) ARGTYPE_V = 0, // void
ARGSIZE_I = 2, // int32_t ARGTYPE_F = 1, // double (64bit)
ARGTYPE_I = 2, // int32_t
ARGTYPE_U = 3, // uint32_t
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
ARGSIZE_Q = 3, // uint64_t ARGTYPE_Q = 4, // uint64_t
#endif #endif
ARGSIZE_U = 6, // uint32_t
ARGSIZE_MASK_ANY = 7,
ARGSIZE_MASK_INT = 2,
ARGSIZE_SHIFT = 3,
// aliases // aliases
ARGSIZE_P = PTR_SIZE(ARGSIZE_I, ARGSIZE_Q), // pointer ARGTYPE_P = PTR_SIZE(ARGTYPE_I, ARGTYPE_Q), // pointer
ARGSIZE_LO = ARGSIZE_I, // int32_t ARGTYPE_LO = ARGTYPE_I, // int32_t
ARGSIZE_B = ARGSIZE_I, // bool ARGTYPE_B = ARGTYPE_I // bool
ARGSIZE_V = ARGSIZE_NONE // void
}; };
// In _typesig, each entry is three bits.
static const int ARGTYPE_SHIFT = 3;
static const int ARGTYPE_MASK = 0x7;
enum IndirectCall { enum IndirectCall {
CALL_INDIRECT = 0 CALL_INDIRECT = 0
}; };
//---------------------------------------------------------------------
--
// Aliasing
// --------
// *Aliasing* occurs when a single memory location can be accessed thro
ugh
// multiple names. For example, consider this code:
//
// ld a[0]
// sti b[0]
// ld a[0]
//
// In general, it's possible that a[0] and b[0] may refer to the same
// memory location. This means, for example, that you cannot safely
// perform CSE on the two loads. However, if you know that 'a' cannot
be
// an alias of 'b' (ie. the two loads do not alias with the store) then
// you can safely perform CSE.
//
// Access regions
// --------------
// Doing alias analysis precisely is difficult. But it turns out that
// keeping track of aliasing at a very coarse level is enough to help w
ith
// many optimisations. So we conceptually divide the memory that is
// accessible from LIR into a small number of "access regions". An acc
ess
// region may be non-contiguous. No two access regions can overlap. T
he
// union of all access regions covers all memory accessible from LIR.
//
// In general a (static) load or store may be executed more than once,
and
// thus may access multiple regions; however, in practice almost all
// loads and stores will obviously access only a single region. A
// function called from LIR may load and/or store multiple access regio
ns
// (even if executed only once).
//
// If two loads/stores/calls are known to not access the same region(s)
,
// then they do not alias.
//
// The access regions used are as follows:
//
// - READONLY: all memory that is read-only, ie. never stored to.
// A load from a READONLY region will never alias with any stores.
//
// - STACK: the stack. Stack loads/stores can usually be easily
// identified because they use SP as the base pointer.
//
// - RSTACK: the return stack. Return stack loads/stores can usually b
e
// easily identified because they use RP as the base pointer.
//
// - OTHER: all other regions of memory.
//
// It makes sense to add new access regions when doing so will help wit
h
// one or more optimisations.
//
// One subtlety is that the meanings of the access region markings only
// apply to the LIR fragment that they are in. For example, if a memor
y
// location M is read-only in a particular LIR fragment, all loads
// involving M in that fragment can be safely marked READONLY, even if
M
// is modified elsewhere. This is safe because the a LIR fragment is t
he
// unit of analysis in which the markings are used. In other words ali
as
// region markings are only used for intra-fragment optimisations.
//
// Access region sets and instruction markings
// -------------------------------------------
// The LIR generator must mark each load/store with an "access region
// set", which is a set of one or more access regions. This indicates
// which parts of LIR-accessible memory the load/store may touch.
//
// The LIR generator must also mark each function called from LIR with
an
// access region set for memory stored to by the function. (We could a
lso
// have a marking for memory loads, but there's no need at the moment.)
// These markings apply to the function itself, not the call site (ie.
// they're not context-sensitive).
//
// These load/store/call markings MUST BE ACCURATE -- if they are wrong
// then invalid optimisations might occur that change the meaning of th
e
// code. However, they can safely be imprecise (ie. conservative), in
the
// following ways:
//
// - A load that accesses a READONLY region can be safely marked instea
d
// as loading from OTHER. In other words, it's safe to underestimate
// the size of the READONLY region. (This would also apply to the lo
ad
// set of a function, if we recorded that.)
//
// - A load/store can safely be marked as accessing regions that it
// doesn't, so long as the regions it does access are also included (
one
// exception: marking a store with READONLY is nonsense and will caus
e
// assertions).
//
// In other words, a load/store can be marked with an access region s
et
// that is a superset of its actual access region set. Taking this t
o
// its logical conclusion, any load can be safely marked with LOAD_AN
Y and
// any store can be safely marked with with STORE_ANY (and the latter
is
// true for the store set of a function.)
//
// Such imprecision is safe but may reduce optimisation opportunities.
//
// Optimisations that use access region info
// -----------------------------------------
// Currently only CseFilter uses this, and only for determining whether
// loads can be CSE'd. Note that CseFilter treats loads that are marke
d
// with a single access region precisely, but all loads marked with
// multiple access regions get lumped together. So if you can't mark a
// load with a single access region, you might as well use ACC_LOAD_ANY
.
//---------------------------------------------------------------------
--
// An access region set is represented as a bitset. Nb: this restricts
us
// to at most eight alias regions for the moment.
typedef uint8_t AccSet;
// The access regions. Note that because of the bitset representation
// these constants are also valid (singleton) AccSet values. If you ad
d
// new ones please update ACC_ALL_STORABLE and formatAccSet() and
// CseFilter.
//
static const AccSet ACC_READONLY = 1 << 0; // 0000_0001b
static const AccSet ACC_STACK = 1 << 1; // 0000_0010b
static const AccSet ACC_RSTACK = 1 << 2; // 0000_0100b
static const AccSet ACC_OTHER = 1 << 3; // 0000_1000b
// Some common (non-singleton) access region sets. ACC_NONE does not m
ake
// sense for loads or stores (which must access at least one region), i
t
// only makes sense for calls.
//
// A convention that's worth using: use ACC_LOAD_ANY/ACC_STORE_ANY for
// cases that you're unsure about or haven't considered carefully. Use
// ACC_ALL/ACC_ALL_STORABLE for cases that you have considered carefull
y.
// That way it's easy to tell which ones have been considered and which
// haven't.
static const AccSet ACC_NONE = 0x0;
static const AccSet ACC_ALL_STORABLE = ACC_STACK | ACC_RSTACK | ACC_OTH
ER;
static const AccSet ACC_ALL = ACC_READONLY | ACC_ALL_STORABLE;
static const AccSet ACC_LOAD_ANY = ACC_ALL; // synonym
static const AccSet ACC_STORE_ANY = ACC_ALL_STORABLE; // synonym
struct CallInfo struct CallInfo
{ {
private:
public:
uintptr_t _address; uintptr_t _address;
uint32_t _argtypes:27; // 9 3-bit fields indicating arg type, uint32_t _typesig:27; // 9 3-bit fields indicating arg type,
by ARGSIZE above (including ret type): a1 a2 a3 a4 a5 ret by ARGTYPE above (including ret type): a1 a2 a3 a4 a5 ret
uint8_t _cse:1; // true if no side effects
uint8_t _fold:1; // true if no side effects
AbiKind _abi:3; AbiKind _abi:3;
uint8_t _isPure:1; // _isPure=1 means no side-effects, res
ult only depends on args
AccSet _storeAccSet; // access regions stored by the functio
n
verbose_only ( const char* _name; ) verbose_only ( const char* _name; )
uint32_t _count_args(uint32_t mask) const; uint32_t count_args() const;
uint32_t count_int32_args() const;
// Nb: uses right-to-left order, eg. sizes[0] is the size of the ri ght-most arg. // Nb: uses right-to-left order, eg. sizes[0] is the size of the ri ght-most arg.
uint32_t get_sizes(ArgSize* sizes) const; uint32_t getArgTypes(ArgType* types) const;
inline ArgSize returnType() const {
return ArgSize(_argtypes & ARGSIZE_MASK_ANY);
}
// Note that this indexes arguments *backwards*, that is to inline ArgType returnType() const {
// get the Nth arg, you have to ask for index (numargs - N). return ArgType(_typesig & ARGTYPE_MASK);
// See mozilla bug 525815 for fixing this.
inline ArgSize argType(uint32_t arg) const {
return ArgSize((_argtypes >> (ARGSIZE_SHIFT * (arg+1))) & ARGSI
ZE_MASK_ANY);
} }
inline bool isIndirect() const { inline bool isIndirect() const {
return _address < 256; return _address < 256;
} }
inline uint32_t count_args() const {
return _count_args(ARGSIZE_MASK_ANY);
}
inline uint32_t count_iargs() const {
return _count_args(ARGSIZE_MASK_INT);
}
// fargs = args - iargs
}; };
/* /*
* Record for extra data used to compile switches as jump tables. * Record for extra data used to compile switches as jump tables.
*/ */
struct SwitchInfo struct SwitchInfo
{ {
NIns** table; // Jump table; a jump address is NIns* NIns** table; // Jump table; a jump address is NIns*
uint32_t count; // Number of table entries uint32_t count; // Number of table entries
// Index value at last execution of the switch. The index value // Index value at last execution of the switch. The index value
// is the offset into the jump table. Thus it is computed as // is the offset into the jump table. Thus it is computed as
// (switch expression) - (lowest case value). // (switch expression) - (lowest case value).
uint32_t index; uint32_t index;
}; };
// Array holding the 'isCse' field from LIRopcode.tbl.
extern const int8_t isCses[]; // cannot be uint8_t, some values a
re negative
inline bool isCseOpcode(LOpcode op) { inline bool isCseOpcode(LOpcode op) {
return NanoAssert(isCses[op] != -1); // see LIRopcode.tbl to understand
#if defined NANOJIT_64BIT this
(op >= LIR_quad && op <= LIR_quge) || return isCses[op] == 1;
#else
(op >= LIR_i2f && op <= LIR_float) || // XXX: yuk; use a
table (bug 542932)
#endif
(op >= LIR_int && op <= LIR_uge);
} }
inline bool isRetOpcode(LOpcode op) { inline bool isRetOpcode(LOpcode op) {
return return
#if defined NANOJIT_64BIT #if defined NANOJIT_64BIT
op == LIR_qret || op == LIR_qret ||
#endif #endif
op == LIR_ret || op == LIR_fret; op == LIR_ret || op == LIR_fret;
} }
inline bool isCmovOpcode(LOpcode op) { inline bool isCmovOpcode(LOpcode op) {
return return
#if defined NANOJIT_64BIT #if defined NANOJIT_64BIT
op == LIR_qcmov || op == LIR_qcmov ||
#endif #endif
op == LIR_cmov; op == LIR_cmov;
} }
inline bool isICmpOpcode(LOpcode op) {
return LIR_eq <= op && op <= LIR_uge;
}
inline bool isSICmpOpcode(LOpcode op) {
return LIR_eq <= op && op <= LIR_ge;
}
inline bool isUICmpOpcode(LOpcode op) {
return LIR_eq == op || (LIR_ult <= op && op <= LIR_uge);
}
#ifdef NANOJIT_64BIT
inline bool isQCmpOpcode(LOpcode op) {
return LIR_qeq <= op && op <= LIR_quge;
}
inline bool isSQCmpOpcode(LOpcode op) {
return LIR_qeq <= op && op <= LIR_qge;
}
inline bool isUQCmpOpcode(LOpcode op) {
return LIR_qeq == op || (LIR_qult <= op && op <= LIR_quge);
}
#endif
inline bool isFCmpOpcode(LOpcode op) {
return LIR_feq <= op && op <= LIR_fge;
}
inline LOpcode invertCondJmpOpcode(LOpcode op) {
NanoAssert(op == LIR_jt || op == LIR_jf);
return LOpcode(op ^ 1);
}
inline LOpcode invertCondGuardOpcode(LOpcode op) {
NanoAssert(op == LIR_xt || op == LIR_xf);
return LOpcode(op ^ 1);
}
inline LOpcode invertICmpOpcode(LOpcode op) {
NanoAssert(isICmpOpcode(op));
return LOpcode(op ^ 1);
}
#ifdef NANOJIT_64BIT
inline LOpcode invertQCmpOpcode(LOpcode op) {
NanoAssert(isQCmpOpcode(op));
return LOpcode(op ^ 1);
}
#endif
inline LOpcode invertFCmpOpcode(LOpcode op) {
NanoAssert(isFCmpOpcode(op));
return LOpcode(op ^ 1);
}
inline LOpcode getCallOpcode(const CallInfo* ci) { inline LOpcode getCallOpcode(const CallInfo* ci) {
LOpcode op = LIR_pcall; LOpcode op = LIR_pcall;
switch (ci->returnType()) { switch (ci->returnType()) {
case ARGSIZE_NONE: op = LIR_pcall; break; case ARGTYPE_V: op = LIR_pcall; break;
case ARGSIZE_I: case ARGTYPE_I:
case ARGSIZE_U: op = LIR_icall; break; case ARGTYPE_U: op = LIR_icall; break;
case ARGSIZE_F: op = LIR_fcall; break; case ARGTYPE_F: op = LIR_fcall; break;
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
case ARGSIZE_Q: op = LIR_qcall; break; case ARGTYPE_Q: op = LIR_qcall; break;
#endif #endif
default: NanoAssert(0); break; default: NanoAssert(0); break;
} }
return op; return op;
} }
LOpcode f64arith_to_i32arith(LOpcode op); LOpcode f64arith_to_i32arith(LOpcode op);
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
LOpcode i32cmp_to_i64cmp(LOpcode op); LOpcode i32cmp_to_i64cmp(LOpcode op);
#endif #endif
LOpcode f64cmp_to_i32cmp(LOpcode op);
LOpcode f64cmp_to_u32cmp(LOpcode op);
// Array holding the 'repKind' field from LIRopcode.tbl. // Array holding the 'repKind' field from LIRopcode.tbl.
extern const uint8_t repKinds[]; extern const uint8_t repKinds[];
enum LTy { enum LTy {
LTy_Void, // no value/no type LTy_Void, // no value/no type
LTy_I32, // 32-bit integer LTy_I32, // 32-bit integer
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
LTy_I64, // 64-bit integer LTy_I64, // 64-bit integer
#endif #endif
skipping to change at line 389 skipping to change at line 820
inline LInsJtbl*toLInsJtbl()const; inline LInsJtbl*toLInsJtbl()const;
void staticSanityCheck(); void staticSanityCheck();
public: public:
// LIns initializers. // LIns initializers.
inline void initLInsOp0(LOpcode opcode); inline void initLInsOp0(LOpcode opcode);
inline void initLInsOp1(LOpcode opcode, LIns* oprnd1); inline void initLInsOp1(LOpcode opcode, LIns* oprnd1);
inline void initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2) ; inline void initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2) ;
inline void initLInsOp3(LOpcode opcode, LIns* oprnd1, LIns* oprnd2, LIns* oprnd3); inline void initLInsOp3(LOpcode opcode, LIns* oprnd1, LIns* oprnd2, LIns* oprnd3);
inline void initLInsLd(LOpcode opcode, LIns* val, int32_t d); inline void initLInsLd(LOpcode opcode, LIns* val, int32_t d, AccSet
inline void initLInsSti(LOpcode opcode, LIns* val, LIns* base, int3 accSet);
2_t d); inline void initLInsSti(LOpcode opcode, LIns* val, LIns* base, int3
2_t d, AccSet accSet);
inline void initLInsSk(LIns* prevLIns); inline void initLInsSk(LIns* prevLIns);
// Nb: args[] must be allocated and initialised before being passed in; // Nb: args[] must be allocated and initialised before being passed in;
// initLInsC() just copies the pointer into the LInsC. // initLInsC() just copies the pointer into the LInsC.
inline void initLInsC(LOpcode opcode, LIns** args, const CallInfo* ci); inline void initLInsC(LOpcode opcode, LIns** args, const CallInfo* ci);
inline void initLInsP(int32_t arg, int32_t kind); inline void initLInsP(int32_t arg, int32_t kind);
inline void initLInsI(LOpcode opcode, int32_t imm32); inline void initLInsI(LOpcode opcode, int32_t imm32);
inline void initLInsN64(LOpcode opcode, int64_t imm64); inline void initLInsN64(LOpcode opcode, int64_t imm64);
inline void initLInsJtbl(LIns* index, uint32_t size, LIns** table); inline void initLInsJtbl(LIns* index, uint32_t size, LIns** table);
LOpcode opcode() const { return lastWord.opcode; } LOpcode opcode() const { return lastWord.opcode; }
skipping to change at line 473 skipping to change at line 904
inline LIns* oprnd2() const; inline LIns* oprnd2() const;
inline LIns* oprnd3() const; inline LIns* oprnd3() const;
// For branches. // For branches.
inline LIns* getTarget() const; inline LIns* getTarget() const;
inline void setTarget(LIns* label); inline void setTarget(LIns* label);
// For guards. // For guards.
inline GuardRecord* record() const; inline GuardRecord* record() const;
// Displacement for LInsLd/LInsSti // For loads/stores.
inline int32_t disp() const; inline int32_t disp() const;
inline AccSet accSet() const;
// For LInsSk. // For LInsSk.
inline LIns* prevLIns() const; inline LIns* prevLIns() const;
// For LInsP. // For LInsP.
inline uint8_t paramArg() const; inline uint8_t paramArg() const;
inline uint8_t paramKind() const; inline uint8_t paramKind() const;
// For LInsI. // For LInsI.
inline int32_t imm32() const; inline int32_t imm32() const;
skipping to change at line 563 skipping to change at line 995
bool isLInsN64() const { bool isLInsN64() const {
NanoAssert(LRK_None != repKinds[opcode()]); NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_N64 == repKinds[opcode()]; return LRK_N64 == repKinds[opcode()];
} }
bool isLInsJtbl() const { bool isLInsJtbl() const {
NanoAssert(LRK_None != repKinds[opcode()]); NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_Jtbl == repKinds[opcode()]; return LRK_Jtbl == repKinds[opcode()];
} }
// LIns predicates. // LIns predicates.
bool isCse() const { bool isop(LOpcode o) const {
return isCseOpcode(opcode()) || (isCall() && callInfo()->_cse); return opcode() == o;
} }
bool isRet() const { bool isRet() const {
return isRetOpcode(opcode()); return isRetOpcode(opcode());
} }
bool isLive() const { bool isLive() const {
LOpcode op = opcode(); return isop(LIR_live) ||
return
#if defined NANOJIT_64BIT #if defined NANOJIT_64BIT
op == LIR_qlive || isop(LIR_qlive) ||
#endif #endif
op == LIR_live || op == LIR_flive; isop(LIR_flive);
}
bool isop(LOpcode o) const {
return opcode() == o;
} }
bool isCmp() const { bool isCmp() const {
LOpcode op = opcode(); LOpcode op = opcode();
return (op >= LIR_eq && op <= LIR_uge) || return isICmpOpcode(op) ||
#if defined NANOJIT_64BIT #if defined NANOJIT_64BIT
(op >= LIR_qeq && op <= LIR_quge) || isQCmpOpcode(op) ||
#endif #endif
(op >= LIR_feq && op <= LIR_fge); isFCmpOpcode(op);
} }
bool isCall() const { bool isCall() const {
return return isop(LIR_icall) ||
#if defined NANOJIT_64BIT #if defined NANOJIT_64BIT
isop(LIR_qcall) || isop(LIR_qcall) ||
#endif #endif
isop(LIR_icall) || isop(LIR_fcall); isop(LIR_fcall);
} }
bool isCmov() const { bool isCmov() const {
return isCmovOpcode(opcode()); return isCmovOpcode(opcode());
} }
bool isStore() const { bool isStore() const {
return isLInsSti(); return isLInsSti();
} }
bool isLoad() const { bool isLoad() const {
return isLInsLd(); return isLInsLd();
} }
bool isGuard() const { bool isGuard() const {
return isop(LIR_x) || isop(LIR_xf) || isop(LIR_xt) || return isop(LIR_x) || isop(LIR_xf) || isop(LIR_xt) ||
isop(LIR_xbarrier) || isop(LIR_xtbl) || isop(LIR_xbarrier) || isop(LIR_xtbl) ||
isop(LIR_addxov) || isop(LIR_subxov) || isop(LIR_mulxov) ; isop(LIR_addxov) || isop(LIR_subxov) || isop(LIR_mulxov) ;
} }
// True if the instruction is a 32-bit or smaller constant integer. // True if the instruction is a 32-bit integer immediate.
bool isconst() const { bool isconst() const {
return isop(LIR_int); return isop(LIR_int);
} }
// True if the instruction is a 32-bit or smaller constant integer // True if the instruction is a 32-bit integer immediate and
and // has the value 'val' when treated as a 32-bit signed integer.
// has the value val when treated as a 32-bit signed integer.
bool isconstval(int32_t val) const { bool isconstval(int32_t val) const {
return isconst() && imm32()==val; return isconst() && imm32()==val;
} }
// True if the instruction is a constant quad value.
bool isconstq() const {
return
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
isop(LIR_quad) || // True if the instruction is a 64-bit integer immediate.
#endif bool isconstq() const {
isop(LIR_float); return isop(LIR_quad);
} }
// True if the instruction is a constant pointer value. #endif
// True if the instruction is a pointer-sized integer immediate.
bool isconstp() const bool isconstp() const
{ {
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
return isconstq(); return isconstq();
#else #else
return isconst(); return isconst();
#endif #endif
} }
// True if the instruction is a constant float value. // True if the instruction is a 64-bit float immediate.
bool isconstf() const { bool isconstf() const {
return isop(LIR_float); return isop(LIR_float);
} }
// True if the instruction is a 64-bit integer or float immediate.
bool isconstqf() const {
return
#ifdef NANOJIT_64BIT
isconstq() ||
#endif
isconstf();
}
// True if the instruction an any type of immediate.
bool isImmAny() const {
return isconst() || isconstqf();
}
bool isBranch() const { bool isBranch() const {
return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j) || isop(LIR_ jtbl); return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j) || isop(LIR_ jtbl);
} }
LTy retType() const { LTy retType() const {
return retTypes[opcode()]; return retTypes[opcode()];
} }
bool isVoid() const { bool isVoid() const {
return retType() == LTy_Void; return retType() == LTy_Void;
skipping to change at line 683 skipping to change at line 1121
#endif #endif
} }
// Return true if removal of 'ins' from a LIR fragment could // Return true if removal of 'ins' from a LIR fragment could
// possibly change the behaviour of that fragment, even if any // possibly change the behaviour of that fragment, even if any
// value computed by 'ins' is not used later in the fragment. // value computed by 'ins' is not used later in the fragment.
// In other words, can 'ins' possibly alter control flow or memory? // In other words, can 'ins' possibly alter control flow or memory?
// Note, this assumes that loads will never fault and hence cannot // Note, this assumes that loads will never fault and hence cannot
// affect the control flow. // affect the control flow.
bool isStmt() { bool isStmt() {
NanoAssert(!isop(LIR_start) && !isop(LIR_skip)); NanoAssert(!isop(LIR_skip));
// All instructions with Void retType are statements. And some // All instructions with Void retType are statements, as are ca
// calls are statements too. lls
// to impure functions.
if (isCall()) if (isCall())
return !isCse(); return !callInfo()->_isPure;
else else
return isVoid(); return isVoid();
} }
inline void* constvalp() const inline void* constvalp() const
{ {
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
return (void*)imm64(); return (void*)imm64();
#else #else
return (void*)imm32(); return (void*)imm32();
skipping to change at line 773 skipping to change at line 1211
public: public:
LIns* getLIns() { return &ins; }; LIns* getLIns() { return &ins; };
}; };
// Used for all loads. // Used for all loads.
class LInsLd class LInsLd
{ {
private: private:
friend class LIns; friend class LIns;
int32_t disp; // Nb: the LIR writer pipeline handles things if a displacement
// exceeds 16 bits. This is rare, but does happen occasionally. W
e
// could go to 24 bits but then it would happen so rarely that the
// handler code would be difficult to test and thus untrustworthy.
int16_t disp;
AccSet accSet;
LIns* oprnd_1; LIns* oprnd_1;
LIns ins; LIns ins;
public: public:
LIns* getLIns() { return &ins; }; LIns* getLIns() { return &ins; };
}; };
// Used for LIR_sti and LIR_stqi. // Used for LIR_sti and LIR_stqi.
class LInsSti class LInsSti
{ {
private: private:
friend class LIns; friend class LIns;
int32_t disp; int16_t disp;
AccSet accSet;
LIns* oprnd_2; LIns* oprnd_2;
LIns* oprnd_1; LIns* oprnd_1;
LIns ins; LIns ins;
public: public:
LIns* getLIns() { return &ins; }; LIns* getLIns() { return &ins; };
}; };
skipping to change at line 946 skipping to change at line 1390
} }
void LIns::initLInsOp3(LOpcode opcode, LIns* oprnd1, LIns* oprnd2, LIns * oprnd3) { void LIns::initLInsOp3(LOpcode opcode, LIns* oprnd1, LIns* oprnd2, LIns * oprnd3) {
clearReg(); clearReg();
clearArIndex(); clearArIndex();
lastWord.opcode = opcode; lastWord.opcode = opcode;
toLInsOp3()->oprnd_1 = oprnd1; toLInsOp3()->oprnd_1 = oprnd1;
toLInsOp3()->oprnd_2 = oprnd2; toLInsOp3()->oprnd_2 = oprnd2;
toLInsOp3()->oprnd_3 = oprnd3; toLInsOp3()->oprnd_3 = oprnd3;
NanoAssert(isLInsOp3()); NanoAssert(isLInsOp3());
} }
void LIns::initLInsLd(LOpcode opcode, LIns* val, int32_t d) { void LIns::initLInsLd(LOpcode opcode, LIns* val, int32_t d, AccSet accS et) {
clearReg(); clearReg();
clearArIndex(); clearArIndex();
lastWord.opcode = opcode; lastWord.opcode = opcode;
toLInsLd()->oprnd_1 = val; toLInsLd()->oprnd_1 = val;
toLInsLd()->disp = d; NanoAssert(d == int16_t(d));
toLInsLd()->disp = int16_t(d);
toLInsLd()->accSet = accSet;
NanoAssert(isLInsLd()); NanoAssert(isLInsLd());
} }
void LIns::initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d ) { void LIns::initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d , AccSet accSet) {
clearReg(); clearReg();
clearArIndex(); clearArIndex();
lastWord.opcode = opcode; lastWord.opcode = opcode;
toLInsSti()->oprnd_1 = val; toLInsSti()->oprnd_1 = val;
toLInsSti()->oprnd_2 = base; toLInsSti()->oprnd_2 = base;
toLInsSti()->disp = d; NanoAssert(d == int16_t(d));
toLInsSti()->disp = int16_t(d);
toLInsSti()->accSet = accSet;
NanoAssert(isLInsSti()); NanoAssert(isLInsSti());
} }
void LIns::initLInsSk(LIns* prevLIns) { void LIns::initLInsSk(LIns* prevLIns) {
clearReg(); clearReg();
clearArIndex(); clearArIndex();
lastWord.opcode = LIR_skip; lastWord.opcode = LIR_skip;
toLInsSk()->prevLIns = prevLIns; toLInsSk()->prevLIns = prevLIns;
NanoAssert(isLInsSk()); NanoAssert(isLInsSk());
} }
void LIns::initLInsC(LOpcode opcode, LIns** args, const CallInfo* ci) { void LIns::initLInsC(LOpcode opcode, LIns** args, const CallInfo* ci) {
skipping to change at line 1079 skipping to change at line 1527
int32_t LIns::disp() const { int32_t LIns::disp() const {
if (isLInsSti()) { if (isLInsSti()) {
return toLInsSti()->disp; return toLInsSti()->disp;
} else { } else {
NanoAssert(isLInsLd()); NanoAssert(isLInsLd());
return toLInsLd()->disp; return toLInsLd()->disp;
} }
} }
AccSet LIns::accSet() const {
if (isLInsSti()) {
return toLInsSti()->accSet;
} else {
NanoAssert(isLInsLd());
return toLInsLd()->accSet;
}
}
LIns* LIns::prevLIns() const { LIns* LIns::prevLIns() const {
NanoAssert(isLInsSk()); NanoAssert(isLInsSk());
return toLInsSk()->prevLIns; return toLInsSk()->prevLIns;
} }
inline uint8_t LIns::paramArg() const { NanoAssert(isop(LIR_param)); r eturn toLInsP()->arg; } inline uint8_t LIns::paramArg() const { NanoAssert(isop(LIR_param)); r eturn toLInsP()->arg; }
inline uint8_t LIns::paramKind() const { NanoAssert(isop(LIR_param)); r eturn toLInsP()->kind; } inline uint8_t LIns::paramKind() const { NanoAssert(isop(LIR_param)); r eturn toLInsP()->kind; }
inline int32_t LIns::imm32() const { NanoAssert(isconst()); return toLInsI()->imm32; } inline int32_t LIns::imm32() const { NanoAssert(isconst()); return toLInsI()->imm32; }
inline int32_t LIns::imm64_0() const { NanoAssert(isconstq()); return inline int32_t LIns::imm64_0() const { NanoAssert(isconstqf()); retur
toLInsN64()->imm64_0; } n toLInsN64()->imm64_0; }
inline int32_t LIns::imm64_1() const { NanoAssert(isconstq()); return inline int32_t LIns::imm64_1() const { NanoAssert(isconstqf()); retur
toLInsN64()->imm64_1; } n toLInsN64()->imm64_1; }
uint64_t LIns::imm64() const { uint64_t LIns::imm64() const {
NanoAssert(isconstq()); NanoAssert(isconstqf());
return (uint64_t(toLInsN64()->imm64_1) << 32) | uint32_t(toLInsN64( )->imm64_0); return (uint64_t(toLInsN64()->imm64_1) << 32) | uint32_t(toLInsN64( )->imm64_0);
} }
double LIns::imm64f() const { double LIns::imm64f() const {
NanoAssert(isconstf());
union { union {
double f; double f;
uint64_t q; uint64_t q;
} u; } u;
u.q = imm64(); u.q = imm64();
return u.f; return u.f;
} }
int32_t LIns::size() const { int32_t LIns::size() const {
NanoAssert(isop(LIR_alloc)); NanoAssert(isop(LIR_alloc));
skipping to change at line 1191 skipping to change at line 1649
return out->insImm(imm); return out->insImm(imm);
} }
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
virtual LInsp insImmq(uint64_t imm) { virtual LInsp insImmq(uint64_t imm) {
return out->insImmq(imm); return out->insImmq(imm);
} }
#endif #endif
virtual LInsp insImmf(double d) { virtual LInsp insImmf(double d) {
return out->insImmf(d); return out->insImmf(d);
} }
virtual LInsp insLoad(LOpcode op, LIns* base, int32_t d) { virtual LInsp insLoad(LOpcode op, LIns* base, int32_t d, AccSet acc
return out->insLoad(op, base, d); Set) {
return out->insLoad(op, base, d, accSet);
} }
virtual LInsp insStore(LOpcode op, LIns* value, LIns* base, int32_t virtual LInsp insStore(LOpcode op, LIns* value, LIns* base, int32_t
d) { d, AccSet accSet) {
return out->insStore(op, value, base, d); return out->insStore(op, value, base, d, accSet);
} }
// args[] is in reverse order, ie. args[0] holds the rightmost arg. // args[] is in reverse order, ie. args[0] holds the rightmost arg.
virtual LInsp insCall(const CallInfo *call, LInsp args[]) { virtual LInsp insCall(const CallInfo *call, LInsp args[]) {
return out->insCall(call, args); return out->insCall(call, args);
} }
virtual LInsp insAlloc(int32_t size) { virtual LInsp insAlloc(int32_t size) {
NanoAssert(size != 0); NanoAssert(size != 0);
return out->insAlloc(size); return out->insAlloc(size);
} }
virtual LInsp insJtbl(LIns* index, uint32_t size) { virtual LInsp insJtbl(LIns* index, uint32_t size) {
return out->insJtbl(index, size); return out->insJtbl(index, size);
} }
// convenience functions // convenience functions
// Inserts a conditional to execute and branches to execute if // Inserts a conditional to execute and branches to execute if
// the condition is true and false respectively. // the condition is true and false respectively.
LIns* ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bo LIns* ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_
ol use_cmov); cmov);
// Inserts an integer comparison to 0 // Inserts an integer comparison to 0
LIns* ins_eq0(LIns* oprnd1); LIns* ins_eq0(LIns* oprnd1) {
return ins2i(LIR_eq, oprnd1, 0);
}
// Inserts a pointer comparison to 0 // Inserts a pointer comparison to 0
LIns* ins_peq0(LIns* oprnd1); LIns* ins_peq0(LIns* oprnd1) {
return ins2(LIR_peq, oprnd1, insImmWord(0));
}
// Inserts a binary operation where the second operand is an // Inserts a binary operation where the second operand is an
// integer immediate. // integer immediate.
LIns* ins2i(LOpcode op, LIns *oprnd1, int32_t); LIns* ins2i(LOpcode v, LIns* oprnd1, int32_t imm) {
return ins2(v, oprnd1, insImm(imm));
}
#if NJ_SOFTFLOAT_SUPPORTED #if NJ_SOFTFLOAT_SUPPORTED
LIns* qjoin(LInsp lo, LInsp hi); LIns* qjoin(LInsp lo, LInsp hi) {
return ins2(LIR_qjoin, lo, hi);
}
#endif #endif
LIns* insImmPtr(const void *ptr); LIns* insImmPtr(const void *ptr) {
LIns* insImmWord(intptr_t ptr); #ifdef NANOJIT_64BIT
// Sign or zero extend integers to native integers. On 32-bit this return insImmq((uint64_t)ptr);
is a no-op. #else
LIns* ins_i2p(LIns* intIns); return insImm((int32_t)ptr);
LIns* ins_u2p(LIns* uintIns); #endif
// choose LIR_sti or LIR_stqi based on size of value }
LIns* insStorei(LIns* value, LIns* base, int32_t d);
LIns* insImmWord(intptr_t value) {
#ifdef NANOJIT_64BIT
return insImmq(value);
#else
return insImm(value);
#endif
}
// Sign-extend integers to native integers. On 32-bit this is a no-
op.
LIns* ins_i2p(LIns* intIns) {
#ifdef NANOJIT_64BIT
return ins1(LIR_i2q, intIns);
#else
return intIns;
#endif
}
// Zero-extend integers to native integers. On 32-bit this is a no-
op.
LIns* ins_u2p(LIns* uintIns) {
#ifdef NANOJIT_64BIT
return ins1(LIR_u2q, uintIns);
#else
return uintIns;
#endif
}
// Chooses LIR_sti or LIR_stqi based on size of value.
LIns* insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet);
}; };
#ifdef NJ_VERBOSE #ifdef NJ_VERBOSE
extern const char* lirNames[]; extern const char* lirNames[];
/** // Maps address ranges to meaningful names.
* map address ranges to meaningful names. class AddrNameMap
*/
class LabelMap
{ {
Allocator& allocator; Allocator& allocator;
class Entry class Entry
{ {
public: public:
Entry(int) : name(0), size(0), align(0) {} Entry(int) : name(0), size(0), align(0) {}
Entry(char *n, size_t s, size_t a) : name(n),size(s),align(a) { } Entry(char *n, size_t s, size_t a) : name(n), size(s), align(a) {}
char* name; char* name;
size_t size:29, align:3; size_t size:29, align:3;
}; };
TreeMap<const void*, Entry*> names; TreeMap<const void*, Entry*> names; // maps code regions to nam
LogControl *logc; es
char buf[5000], *end;
void formatAddr(const void *p, char *buf);
public: public:
LabelMap(Allocator& allocator, LogControl* logc); AddrNameMap(Allocator& allocator);
void add(const void *p, size_t size, size_t align, const char *name void addAddrRange(const void *p, size_t size, size_t align, const c
); har *name);
const char *dup(const char *); void lookupAddr(void *p, char*& name, int32_t& offset);
const char *format(const void *p);
}; };
// Maps LIR instructions to meaningful names.
class LirNameMap class LirNameMap
{ {
private:
Allocator& alloc; Allocator& alloc;
template <class Key> template <class Key>
class CountMap: public HashMap<Key, int> { class CountMap: public HashMap<Key, int> {
public: public:
CountMap(Allocator& alloc) : HashMap<Key, int>(alloc) {} CountMap(Allocator& alloc) : HashMap<Key, int>(alloc) {}
int add(Key k) { int add(Key k) {
int c = 1; int c = 1;
if (containsKey(k)) { if (containsKey(k)) {
c = 1+get(k); c = 1+get(k);
skipping to change at line 1278 skipping to change at line 1773
CountMap(Allocator& alloc) : HashMap<Key, int>(alloc) {} CountMap(Allocator& alloc) : HashMap<Key, int>(alloc) {}
int add(Key k) { int add(Key k) {
int c = 1; int c = 1;
if (containsKey(k)) { if (containsKey(k)) {
c = 1+get(k); c = 1+get(k);
} }
put(k,c); put(k,c);
return c; return c;
} }
}; };
CountMap<int> lircounts; CountMap<int> lircounts;
CountMap<const CallInfo *> funccounts; CountMap<const CallInfo *> funccounts;
CountMap<const char *> namecounts;
void addNameWithSuffix(LInsp i, const char *s, int suffix, bool ign
oreOneSuffix);
class Entry class Entry
{ {
public: public:
Entry(int) : name(0) {} Entry(int) : name(0) {}
Entry(char* n) : name(n) {} Entry(char* n) : name(n) {}
char* name; char* name;
}; };
HashMap<LInsp, Entry*> names; HashMap<LInsp, Entry*> names;
void formatImm(int32_t c, char *buf);
void formatImmq(uint64_t c, char *buf);
public: public:
LabelMap *labels; LirNameMap(Allocator& alloc)
LirNameMap(Allocator& alloc, LabelMap *lm)
: alloc(alloc), : alloc(alloc),
lircounts(alloc), lircounts(alloc),
funccounts(alloc), funccounts(alloc),
names(alloc), namecounts(alloc),
labels(lm) names(alloc)
{} {}
void addName(LInsp i, const char *s); void addName(LInsp ins, const char *s); // gives 'ins' a sp
void copyName(LInsp i, const char *s, int suffix); ecial name
const char *formatRef(LIns *ref); const char* createName(LInsp ins); // gives 'ins' a ge
const char *formatIns(LInsp i); neric name
void formatGuard(LInsp i, char *buf); const char* lookupName(LInsp ins);
void formatGuardXov(LInsp i, char *buf); };
// We use big buffers for cases where we need to fit a whole instructio
n,
// and smaller buffers for all the others. These should easily be long
// enough, but for safety the formatXyz() functions check and won't exc
eed
// those limits.
class InsBuf {
public:
static const size_t len = 1000;
char buf[len];
};
class RefBuf {
public:
static const size_t len = 200;
char buf[len];
};
class LInsPrinter
{
private:
Allocator& alloc;
void formatImm(RefBuf* buf, int32_t c);
void formatImmq(RefBuf* buf, uint64_t c);
void formatGuard(InsBuf* buf, LInsp ins);
void formatGuardXov(InsBuf* buf, LInsp ins);
public:
LInsPrinter(Allocator& alloc)
: alloc(alloc)
{
addrNameMap = new (alloc) AddrNameMap(alloc);
lirNameMap = new (alloc) LirNameMap(alloc);
}
char *formatAddr(RefBuf* buf, void* p);
char *formatRef(RefBuf* buf, LInsp ref);
char *formatIns(InsBuf* buf, LInsp ins);
char *formatAccSet(RefBuf* buf, AccSet accSet);
AddrNameMap* addrNameMap;
LirNameMap* lirNameMap;
}; };
class VerboseWriter : public LirWriter class VerboseWriter : public LirWriter
{ {
InsList code; InsList code;
LirNameMap* names; LInsPrinter* printer;
LogControl* logc; LogControl* logc;
const char* const prefix; const char* const prefix;
bool const always_flush; bool const always_flush;
public: public:
VerboseWriter(Allocator& alloc, LirWriter *out, VerboseWriter(Allocator& alloc, LirWriter *out, LInsPrinter* printe
LirNameMap* names, LogControl* logc, const char* pref r, LogControl* logc,
ix = "", bool always_flush = false) const char* prefix = "", bool always_flush = false)
: LirWriter(out), code(alloc), names(names), logc(logc), prefix : LirWriter(out), code(alloc), printer(printer), logc(logc), pr
(prefix), always_flush(always_flush) efix(prefix), always_flush(always_flush)
{} {}
LInsp add(LInsp i) { LInsp add(LInsp i) {
if (i) { if (i) {
code.add(i); code.add(i);
if (always_flush) if (always_flush)
flush(); flush();
} }
return i; return i;
} }
LInsp add_flush(LInsp i) { LInsp add_flush(LInsp i) {
if ((i = add(i)) != 0) if ((i = add(i)) != 0)
flush(); flush();
return i; return i;
} }
void flush() void flush()
{ {
if (!code.isEmpty()) { if (!code.isEmpty()) {
InsBuf b;
int32_t count = 0; int32_t count = 0;
for (Seq<LIns*>* p = code.get(); p != NULL; p = p->tail) { for (Seq<LIns*>* p = code.get(); p != NULL; p = p->tail) {
logc->printf("%s %s\n",prefix,names->formatIns(p->he ad)); logc->printf("%s %s\n", prefix, printer->formatIns(& b, p->head));
count++; count++;
} }
code.clear(); code.clear();
if (count > 1) if (count > 1)
logc->printf("\n"); logc->printf("\n");
} }
} }
LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr) { LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr) {
return add_flush(out->insGuard(op,cond,gr)); return add_flush(out->insGuard(op,cond,gr));
skipping to change at line 1390 skipping to change at line 1927
} }
LIns* ins3(LOpcode v, LInsp a, LInsp b, LInsp c) { LIns* ins3(LOpcode v, LInsp a, LInsp b, LInsp c) {
return add(out->ins3(v, a, b, c)); return add(out->ins3(v, a, b, c));
} }
LIns* insCall(const CallInfo *call, LInsp args[]) { LIns* insCall(const CallInfo *call, LInsp args[]) {
return add_flush(out->insCall(call, args)); return add_flush(out->insCall(call, args));
} }
LIns* insParam(int32_t i, int32_t kind) { LIns* insParam(int32_t i, int32_t kind) {
return add(out->insParam(i, kind)); return add(out->insParam(i, kind));
} }
LIns* insLoad(LOpcode v, LInsp base, int32_t disp) { LIns* insLoad(LOpcode v, LInsp base, int32_t disp, AccSet accSet) {
return add(out->insLoad(v, base, disp)); return add(out->insLoad(v, base, disp, accSet));
} }
LIns* insStore(LOpcode op, LInsp v, LInsp b, int32_t d) { LIns* insStore(LOpcode op, LInsp v, LInsp b, int32_t d, AccSet accS
return add(out->insStore(op, v, b, d)); et) {
return add(out->insStore(op, v, b, d, accSet));
} }
LIns* insAlloc(int32_t size) { LIns* insAlloc(int32_t size) {
return add(out->insAlloc(size)); return add(out->insAlloc(size));
} }
LIns* insImm(int32_t imm) { LIns* insImm(int32_t imm) {
return add(out->insImm(imm)); return add(out->insImm(imm));
} }
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
LIns* insImmq(uint64_t imm) { LIns* insImmq(uint64_t imm) {
return add(out->insImmq(imm)); return add(out->insImmq(imm));
skipping to change at line 1424 skipping to change at line 1961
class ExprFilter: public LirWriter class ExprFilter: public LirWriter
{ {
public: public:
ExprFilter(LirWriter *out) : LirWriter(out) {} ExprFilter(LirWriter *out) : LirWriter(out) {}
LIns* ins1(LOpcode v, LIns* a); LIns* ins1(LOpcode v, LIns* a);
LIns* ins2(LOpcode v, LIns* a, LIns* b); LIns* ins2(LOpcode v, LIns* a, LIns* b);
LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c); LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
LIns* insGuard(LOpcode, LIns *cond, GuardRecord *); LIns* insGuard(LOpcode, LIns *cond, GuardRecord *);
LIns* insGuardXov(LOpcode, LIns* a, LIns* b, GuardRecord *); LIns* insGuardXov(LOpcode, LIns* a, LIns* b, GuardRecord *);
LIns* insBranch(LOpcode, LIns *cond, LIns *target); LIns* insBranch(LOpcode, LIns *cond, LIns *target);
LIns* insLoad(LOpcode op, LInsp base, int32_t off); LIns* insLoad(LOpcode op, LInsp base, int32_t off, AccSet accSet);
}; };
enum LInsHashKind { enum LInsHashKind {
// We divide instruction kinds into groups for the use of LInsHashS et. // We divide instruction kinds into groups for the use of LInsHashS et.
// LIns0 isn't present because we don't need to record any 0-ary // LIns0 isn't present because we don't need to record any 0-ary
// instructions. // instructions.
LInsImm = 0, LInsImm = 0,
LInsImmq = 1, // only occurs on 64-bit platforms LInsImmq = 1, // only occurs on 64-bit platforms
LInsImmf = 2, LInsImmf = 2,
LIns1 = 3, LIns1 = 3,
LIns2 = 4, LIns2 = 4,
LIns3 = 5, LIns3 = 5,
LInsLoad = 6, LInsCall = 6,
LInsCall = 7,
// Loads are special. We group them by access region: one table f
or
// each region, and then a catch-all table for any loads marked wit
h
// multiple regions. This arrangement makes the removal of
// invalidated loads fast -- eg. we can invalidate all STACK loads
by
// just clearing the LInsLoadStack table. The disadvantage is that
// loads marked with multiple regions must be invalidated
// conservatively, eg. if any intervening stores occur. But loads
// marked with multiple regions should be rare.
LInsLoadReadOnly = 7,
LInsLoadStack = 8,
LInsLoadRStack = 9,
LInsLoadOther = 10,
LInsLoadMultiple = 11,
LInsFirst = 0, LInsFirst = 0,
LInsLast = 7, LInsLast = 11,
// need a value after "last" to outsmart compilers that will insist last+1 is impossible // need a value after "last" to outsmart compilers that will insist last+1 is impossible
LInsInvalid = 8 LInsInvalid = 12
}; };
#define nextKind(kind) LInsHashKind(kind+1) #define nextKind(kind) LInsHashKind(kind+1)
// @todo, this could be replaced by a generic HashMap or HashSet, if we had one
class LInsHashSet class LInsHashSet
{ {
// Must be a power of 2. // Must be a power of 2.
// Don't start too small, or we'll waste time growing and rehashing . // Don't start too small, or we'll waste time growing and rehashing .
// Don't start too large, will waste memory. // Don't start too large, will waste memory.
static const uint32_t kInitialCap[LInsLast + 1]; static const uint32_t kInitialCap[LInsLast + 1];
// There is one list for each instruction kind. This lets us size the // There is one list for each instruction kind. This lets us size the
// lists appropriately (some instructions are more common than othe rs). // lists appropriately (some instructions are more common than othe rs).
// It also lets us have kind-specific find/add/grow functions, whic h // It also lets us have kind-specific find/add/grow functions, whic h
skipping to change at line 1464 skipping to change at line 2013
// There is one list for each instruction kind. This lets us size the // There is one list for each instruction kind. This lets us size the
// lists appropriately (some instructions are more common than othe rs). // lists appropriately (some instructions are more common than othe rs).
// It also lets us have kind-specific find/add/grow functions, whic h // It also lets us have kind-specific find/add/grow functions, whic h
// are faster than generic versions. // are faster than generic versions.
LInsp *m_list[LInsLast + 1]; LInsp *m_list[LInsLast + 1];
uint32_t m_cap[LInsLast + 1]; uint32_t m_cap[LInsLast + 1];
uint32_t m_used[LInsLast + 1]; uint32_t m_used[LInsLast + 1];
typedef uint32_t (LInsHashSet::*find_t)(LInsp); typedef uint32_t (LInsHashSet::*find_t)(LInsp);
find_t m_find[LInsLast + 1]; find_t m_find[LInsLast + 1];
Allocator& alloc; Allocator& alloc;
static uint32_t hashImm(int32_t); static uint32_t hashImm(int32_t);
static uint32_t hashImmq(uint64_t); // not NANOJIT_64BIT only u static uint32_t hashImmq(uint64_t); // not NANOJIT_64BIT-only -
sed by findImmf() - used by findImmf()
static uint32_t hash1(LOpcode v, LInsp); static uint32_t hash1(LOpcode op, LInsp);
static uint32_t hash2(LOpcode v, LInsp, LInsp); static uint32_t hash2(LOpcode op, LInsp, LInsp);
static uint32_t hash3(LOpcode v, LInsp, LInsp, LInsp); static uint32_t hash3(LOpcode op, LInsp, LInsp, LInsp);
static uint32_t hashLoad(LOpcode v, LInsp, int32_t); static uint32_t hashLoad(LOpcode op, LInsp, int32_t, AccSet);
static uint32_t hashCall(const CallInfo *call, uint32_t argc, LInsp args[]); static uint32_t hashCall(const CallInfo *call, uint32_t argc, LInsp args[]);
// These private versions are used after an LIns has been created; // These private versions are used after an LIns has been created;
// they are used for rehashing after growing. // they are used for rehashing after growing.
uint32_t findImm(LInsp ins); uint32_t findImm(LInsp ins);
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
uint32_t findImmq(LInsp ins); uint32_t findImmq(LInsp ins);
#endif #endif
uint32_t findImmf(LInsp ins); uint32_t findImmf(LInsp ins);
uint32_t find1(LInsp ins); uint32_t find1(LInsp ins);
uint32_t find2(LInsp ins); uint32_t find2(LInsp ins);
uint32_t find3(LInsp ins); uint32_t find3(LInsp ins);
uint32_t findLoad(LInsp ins);
uint32_t findCall(LInsp ins); uint32_t findCall(LInsp ins);
uint32_t findLoadReadOnly(LInsp ins);
uint32_t findLoadStack(LInsp ins);
uint32_t findLoadRStack(LInsp ins);
uint32_t findLoadOther(LInsp ins);
uint32_t findLoadMultiple(LInsp ins);
void grow(LInsHashKind kind); void grow(LInsHashKind kind);
public: public:
// kInitialCaps[i] holds the initial size for m_list[i]. // kInitialCaps[i] holds the initial size for m_list[i].
LInsHashSet(Allocator&, uint32_t kInitialCaps[]); LInsHashSet(Allocator&, uint32_t kInitialCaps[]);
// These public versions are used before an LIns has been created. // These public versions are used before an LIns has been created.
LInsp findImm(int32_t a, uint32_t &k); LInsp findImm(int32_t a, uint32_t &k);
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
LInsp findImmq(uint64_t a, uint32_t &k); LInsp findImmq(uint64_t a, uint32_t &k);
#endif #endif
LInsp findImmf(uint64_t d, uint32_t &k); LInsp findImmf(uint64_t d, uint32_t &k);
LInsp find1(LOpcode v, LInsp a, uint32_t &k); LInsp find1(LOpcode v, LInsp a, uint32_t &k);
LInsp find2(LOpcode v, LInsp a, LInsp b, uint32_t &k); LInsp find2(LOpcode v, LInsp a, LInsp b, uint32_t &k);
LInsp find3(LOpcode v, LInsp a, LInsp b, LInsp c, uint32_t &k); LInsp find3(LOpcode v, LInsp a, LInsp b, LInsp c, uint32_t &k);
LInsp findLoad(LOpcode v, LInsp a, int32_t b, uint32_t &k); LInsp findLoad(LOpcode v, LInsp a, int32_t b, AccSet accSet, LInsHa
shKind kind,
uint32_t &k);
LInsp findCall(const CallInfo *call, uint32_t argc, LInsp args[], u int32_t &k); LInsp findCall(const CallInfo *call, uint32_t argc, LInsp args[], u int32_t &k);
// 'k' is the index found by findXYZ(). // 'k' is the index found by findXYZ().
LInsp add(LInsHashKind kind, LInsp ins, uint32_t k); void add(LInsHashKind kind, LInsp ins, uint32_t k);
void clear(); void clear(); // clears all tables
void clear(LInsHashKind); // clears one table
}; };
class CseFilter: public LirWriter class CseFilter: public LirWriter
{ {
private: private:
LInsHashSet* exprs; LInsHashSet* exprs;
AccSet storesSinceLastLoad; // regions stored to since the last load
public: public:
CseFilter(LirWriter *out, Allocator&); CseFilter(LirWriter *out, Allocator&);
LIns* insImm(int32_t imm); LIns* insImm(int32_t imm);
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
LIns* insImmq(uint64_t q); LIns* insImmq(uint64_t q);
#endif #endif
LIns* insImmf(double d); LIns* insImmf(double d);
LIns* ins0(LOpcode v); LIns* ins0(LOpcode v);
LIns* ins1(LOpcode v, LInsp); LIns* ins1(LOpcode v, LInsp);
LIns* ins2(LOpcode v, LInsp, LInsp); LIns* ins2(LOpcode v, LInsp, LInsp);
LIns* ins3(LOpcode v, LInsp, LInsp, LInsp); LIns* ins3(LOpcode v, LInsp, LInsp, LInsp);
LIns* insLoad(LOpcode op, LInsp cond, int32_t d); LIns* insLoad(LOpcode op, LInsp base, int32_t d, AccSet accSet);
LIns* insStore(LOpcode op, LInsp value, LInsp base, int32_t d, AccS
et accSet);
LIns* insCall(const CallInfo *call, LInsp args[]); LIns* insCall(const CallInfo *call, LInsp args[]);
LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr); LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
LIns* insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr); LIns* insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr);
}; };
class LirBuffer class LirBuffer
{ {
public: public:
LirBuffer(Allocator& alloc); LirBuffer(Allocator& alloc);
void clear(); void clear();
uintptr_t makeRoom(size_t szB); // make room for an instruc tion uintptr_t makeRoom(size_t szB); // make room for an instruc tion
debug_only (void validate() const;) debug_only (void validate() const;)
verbose_only(LirNameMap* names;) verbose_only(LInsPrinter* printer;)
int32_t insCount(); int32_t insCount();
size_t byteCount(); size_t byteCount();
// stats // stats
struct struct
{ {
uint32_t lir; // # instructions uint32_t lir; // # instructions
} }
_stats; _stats;
skipping to change at line 1586 skipping to change at line 2144
{ {
LirBuffer* _buf; // underlying buffer housing t he instructions LirBuffer* _buf; // underlying buffer housing t he instructions
const Config& _config; const Config& _config;
public: public:
LirBufWriter(LirBuffer* buf, const Config& config) LirBufWriter(LirBuffer* buf, const Config& config)
: LirWriter(0), _buf(buf), _config(config) { : LirWriter(0), _buf(buf), _config(config) {
} }
// LirWriter interface // LirWriter interface
LInsp insLoad(LOpcode op, LInsp base, int32_t disp); LInsp insLoad(LOpcode op, LInsp base, int32_t disp, AccSet ac
LInsp insStore(LOpcode op, LInsp o1, LInsp o2, int32_t disp); cSet);
LInsp insStore(LOpcode op, LInsp o1, LInsp o2, int32_t disp,
AccSet accSet);
LInsp ins0(LOpcode op); LInsp ins0(LOpcode op);
LInsp ins1(LOpcode op, LInsp o1); LInsp ins1(LOpcode op, LInsp o1);
LInsp ins2(LOpcode op, LInsp o1, LInsp o2); LInsp ins2(LOpcode op, LInsp o1, LInsp o2);
LInsp ins3(LOpcode op, LInsp o1, LInsp o2, LInsp o3); LInsp ins3(LOpcode op, LInsp o1, LInsp o2, LInsp o3);
LInsp insParam(int32_t i, int32_t kind); LInsp insParam(int32_t i, int32_t kind);
LInsp insImm(int32_t imm); LInsp insImm(int32_t imm);
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
LInsp insImmq(uint64_t imm); LInsp insImmq(uint64_t imm);
#endif #endif
LInsp insImmf(double d); LInsp insImmf(double d);
skipping to change at line 1613 skipping to change at line 2171
LInsp insJtbl(LIns* index, uint32_t size); LInsp insJtbl(LIns* index, uint32_t size);
}; };
class LirFilter class LirFilter
{ {
public: public:
LirFilter *in; LirFilter *in;
LirFilter(LirFilter *in) : in(in) {} LirFilter(LirFilter *in) : in(in) {}
virtual ~LirFilter(){} virtual ~LirFilter(){}
// It's crucial that once this reaches the LIR_start at the beginni
ng
// of the buffer, that it just keeps returning that LIR_start LIns
on
// any subsequent calls.
virtual LInsp read() { virtual LInsp read() {
return in->read(); return in->read();
} }
virtual LInsp pos() { virtual LInsp finalIns() {
return in->pos(); return in->finalIns();
} }
}; };
// concrete // concrete
class LirReader : public LirFilter class LirReader : public LirFilter
{ {
LInsp _i; // next instruction to be read; invariant: is never a sk LInsp _ins; // next instruction to be read; invariant: is
ip never a skip
LInsp _finalIns; // final instruction in the stream; ie. the fi
rst one to be read
public: public:
LirReader(LInsp i) : LirFilter(0), _i(i) LirReader(LInsp ins) : LirFilter(0), _ins(ins), _finalIns(ins)
{ {
// The last instruction for a fragment shouldn't be a skip. // The last instruction for a fragment shouldn't be a skip.
// (Actually, if the last *inserted* instruction exactly fills up // (Actually, if the last *inserted* instruction exactly fills up
// a chunk, a new chunk will be created, and thus the last *wri tten* // a chunk, a new chunk will be created, and thus the last *wri tten*
// instruction will be a skip -- the one needed for the // instruction will be a skip -- the one needed for the
// cross-chunk link. But the last *inserted* instruction is wh at // cross-chunk link. But the last *inserted* instruction is wh at
// is recorded and used to initialise each LirReader, and that is // is recorded and used to initialise each LirReader, and that is
// what is seen here, and therefore this assertion holds.) // what is seen here, and therefore this assertion holds.)
NanoAssert(i && !i->isop(LIR_skip)); NanoAssert(ins && !ins->isop(LIR_skip));
} }
virtual ~LirReader() {} virtual ~LirReader() {}
// Returns next instruction and advances to the prior instruction. // Returns next instruction and advances to the prior instruction.
// Invariant: never returns a skip. // Invariant: never returns a skip.
LInsp read(); LInsp read();
// Returns next instruction. Invariant: never returns a skip. LInsp finalIns() {
LInsp pos() { return _finalIns;
return _i;
} }
}; };
verbose_only(void live(LirFilter* in, Allocator& alloc, Fragment* frag, LogControl*);) verbose_only(void live(LirFilter* in, Allocator& alloc, Fragment* frag, LogControl*);)
// WARNING: StackFilter assumes that all stack entries are eight bytes.
// Some of its optimisations aren't valid if that isn't true. See
// StackFilter::read() for more details.
class StackFilter: public LirFilter class StackFilter: public LirFilter
{ {
LInsp sp; LInsp sp;
LInsp rp; BitSet stk;
BitSet spStk; int top;
BitSet rpStk; int getTop(LInsp br);
int spTop;
int rpTop;
void getTops(LInsp br, int& spTop, int& rpTop);
public: public:
StackFilter(LirFilter *in, Allocator& alloc, LInsp sp, LInsp rp); StackFilter(LirFilter *in, Allocator& alloc, LInsp sp);
bool ignoreStore(LInsp ins, int top, BitSet* stk);
LInsp read(); LInsp read();
}; };
// eliminate redundant loads by watching for stores & mutator calls
class LoadFilter: public LirWriter
{
public:
LInsp sp, rp;
LInsHashSet* exprs;
void clear(LInsp p);
public:
LoadFilter(LirWriter *out, Allocator& alloc)
: LirWriter(out), sp(NULL), rp(NULL)
{
uint32_t kInitialCaps[LInsLast + 1];
kInitialCaps[LInsImm] = 1;
kInitialCaps[LInsImmq] = 1;
kInitialCaps[LInsImmf] = 1;
kInitialCaps[LIns1] = 1;
kInitialCaps[LIns2] = 1;
kInitialCaps[LIns3] = 1;
kInitialCaps[LInsLoad] = 64;
kInitialCaps[LInsCall] = 1;
exprs = new (alloc) LInsHashSet(alloc, kInitialCaps);
}
LInsp ins0(LOpcode);
LInsp insLoad(LOpcode, LInsp base, int32_t disp);
LInsp insStore(LOpcode op, LInsp v, LInsp b, int32_t d);
LInsp insCall(const CallInfo *call, LInsp args[]);
};
struct SoftFloatOps struct SoftFloatOps
{ {
const CallInfo* opmap[LIR_sentinel]; const CallInfo* opmap[LIR_sentinel];
SoftFloatOps(); SoftFloatOps();
}; };
extern const SoftFloatOps softFloatOps; extern const SoftFloatOps softFloatOps;
// Replaces fpu ops with function calls, for platforms lacking float // Replaces fpu ops with function calls, for platforms lacking float
// hardware (eg. some ARM machines). // hardware (eg. some ARM machines).
skipping to change at line 1741 skipping to change at line 2270
// through the entire writer pipeline and been optimised. By checking // through the entire writer pipeline and been optimised. By checking
// implicit LIR instructions we can check the LIR code at the start of the // implicit LIR instructions we can check the LIR code at the start of the
// writer pipeline, exactly as it is generated by the compiler front-en d. // writer pipeline, exactly as it is generated by the compiler front-en d.
// //
// A general note about the errors produced by this class: for // A general note about the errors produced by this class: for
// TraceMonkey, they won't include special names for instructions that // TraceMonkey, they won't include special names for instructions that
// have them unless TMFLAGS is specified. // have them unless TMFLAGS is specified.
class ValidateWriter : public LirWriter class ValidateWriter : public LirWriter
{ {
private: private:
const char* _whereInPipeline; LInsPrinter* printer;
const char* whereInPipeline;
const char* type2string(LTy type); const char* type2string(LTy type);
void typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args []); void typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args []);
void errorStructureShouldBe(LOpcode op, const char* argDesc, int ar gN, LIns* arg, void errorStructureShouldBe(LOpcode op, const char* argDesc, int ar gN, LIns* arg,
const char* shouldBeDesc); const char* shouldBeDesc);
void errorAccSet(const char* what, AccSet accSet, const char* shoul dDesc);
void checkLInsHasOpcode(LOpcode op, int argN, LIns* ins, LOpcode op 2); void checkLInsHasOpcode(LOpcode op, int argN, LIns* ins, LOpcode op 2);
void checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins); void checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins);
void checkLInsIsNull(LOpcode op, int argN, LIns* ins); void checkLInsIsNull(LOpcode op, int argN, LIns* ins);
void checkAccSet(LOpcode op, LInsp base, AccSet accSet, AccSet maxA
ccSet);
LInsp sp, rp;
public: public:
ValidateWriter(LirWriter* out, const char* stageName); ValidateWriter(LirWriter* out, LInsPrinter* printer, const char* wh
LIns* insLoad(LOpcode op, LIns* base, int32_t d); ere);
LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d); void setSp(LInsp ins) { sp = ins; }
void setRp(LInsp ins) { rp = ins; }
LIns* insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet);
LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccS
et accSet);
LIns* ins0(LOpcode v); LIns* ins0(LOpcode v);
LIns* ins1(LOpcode v, LIns* a); LIns* ins1(LOpcode v, LIns* a);
LIns* ins2(LOpcode v, LIns* a, LIns* b); LIns* ins2(LOpcode v, LIns* a, LIns* b);
LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c); LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
LIns* insParam(int32_t arg, int32_t kind); LIns* insParam(int32_t arg, int32_t kind);
LIns* insImm(int32_t imm); LIns* insImm(int32_t imm);
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
LIns* insImmq(uint64_t imm); LIns* insImmq(uint64_t imm);
#endif #endif
LIns* insImmf(double d); LIns* insImmf(double d);
skipping to change at line 1791 skipping to change at line 2328
#ifdef NJ_VERBOSE #ifdef NJ_VERBOSE
/* A listing filter for LIR, going through backwards. It merely /* A listing filter for LIR, going through backwards. It merely
passes its input to its output, but notes it down too. When passes its input to its output, but notes it down too. When
finish() is called, prints out what went through. Is intended to be finish() is called, prints out what went through. Is intended to be
used to print arbitrary intermediate transformation stages of used to print arbitrary intermediate transformation stages of
LIR. */ LIR. */
class ReverseLister : public LirFilter class ReverseLister : public LirFilter
{ {
Allocator& _alloc; Allocator& _alloc;
LirNameMap* _names; LInsPrinter* _printer;
const char* _title; const char* _title;
StringList _strs; StringList _strs;
LogControl* _logc; LogControl* _logc;
LIns* _prevIns;
public: public:
ReverseLister(LirFilter* in, Allocator& alloc, ReverseLister(LirFilter* in, Allocator& alloc,
LirNameMap* names, LogControl* logc, const char* titl e) LInsPrinter* printer, LogControl* logc, const char* t itle)
: LirFilter(in) : LirFilter(in)
, _alloc(alloc) , _alloc(alloc)
, _names(names) , _printer(printer)
, _title(title) , _title(title)
, _strs(alloc) , _strs(alloc)
, _logc(logc) , _logc(logc)
, _prevIns(NULL)
{ } { }
void finish(); void finish();
LInsp read(); LInsp read();
}; };
#endif #endif
} }
#endif // __nanojit_LIR__ #endif // __nanojit_LIR__
 End of changes. 119 change blocks. 
251 lines changed or deleted 854 lines changed or added


 Nativei386.h   Nativei386.h 
skipping to change at line 183 skipping to change at line 183
#define DECLARE_PLATFORM_STATS() #define DECLARE_PLATFORM_STATS()
#define DECLARE_PLATFORM_REGALLOC() #define DECLARE_PLATFORM_REGALLOC()
#define DECLARE_PLATFORM_ASSEMBLER() \ #define DECLARE_PLATFORM_ASSEMBLER() \
const static Register argRegs[2], retRegs[2]; \ const static Register argRegs[2], retRegs[2]; \
int32_t max_stk_args;\ int32_t max_stk_args;\
void nativePageReset();\ void nativePageReset();\
void nativePageSetup();\ void nativePageSetup();\
void underrunProtect(int);\ void underrunProtect(int);\
void asm_int(Register r, int32_t val, bool canClobberCCs);\ void asm_immi(Register r, int32_t val, bool canClobberCCs);\
void asm_stkarg(LInsp p, int32_t& stkd);\ void asm_stkarg(LInsp p, int32_t& stkd);\
void asm_farg(LInsp, int32_t& stkd);\ void asm_farg(LInsp, int32_t& stkd);\
void asm_arg(ArgSize sz, LInsp p, Register r, int32_t& stkd);\ void asm_arg(ArgType ty, LInsp p, Register r, int32_t& stkd);\
void asm_pusharg(LInsp);\ void asm_pusharg(LInsp);\
void asm_fcmp(LIns *cond);\ void asm_fcmp(LIns *cond);\
NIns* asm_fbranch(bool, LIns*, NIns*);\ NIns* asm_fbranch(bool, LIns*, NIns*);\
void asm_cmp(LIns *cond); \ void asm_cmp(LIns *cond); \
void asm_div_mod(LIns *cond); \ void asm_div_mod(LIns *cond); \
void asm_load(int d, Register r); \ void asm_load(int d, Register r); \
void asm_quad(Register r, uint64_t q, double d, bool canClobberCCs) ; void asm_immf(Register r, uint64_t q, double d, bool canClobberCCs) ;
#define IMM8(i) \ #define IMM8(i) \
_nIns -= 1; \ _nIns -= 1; \
*((int8_t*)_nIns) = (int8_t)(i) *((int8_t*)_nIns) = (int8_t)(i)
#define IMM16(i) \ #define IMM16(i) \
_nIns -= 2; \ _nIns -= 2; \
*((int16_t*)_nIns) = (int16_t)(i) *((int16_t*)_nIns) = (int16_t)(i)
#define IMM32(i) \ #define IMM32(i) \
skipping to change at line 881 skipping to change at line 881
} while(0) } while(0)
#define SSE_XORPD(r, maskaddr) do {\ #define SSE_XORPD(r, maskaddr) do {\
count_fpuld();\ count_fpuld();\
underrunProtect(8); \ underrunProtect(8); \
IMM32(maskaddr);\ IMM32(maskaddr);\
*(--_nIns) = uint8_t(((r)&7)<<3|5); \ *(--_nIns) = uint8_t(((r)&7)<<3|5); \
*(--_nIns) = 0x57;\ *(--_nIns) = 0x57;\
*(--_nIns) = 0x0f;\ *(--_nIns) = 0x0f;\
*(--_nIns) = 0x66;\ *(--_nIns) = 0x66;\
asm_output("xorpd %s,[0x%p]",gpn(r),(void*)(maskaddr));\ asm_output("xorpd %s,[%p]",gpn(r),(void*)(maskaddr));\
} while(0) } while(0)
#define SSE_XORPDr(rd,rs) do{ \ #define SSE_XORPDr(rd,rs) do{ \
count_fpu();\ count_fpu();\
SSE(0x660f57, (rd)&7, (rs)&7); \ SSE(0x660f57, (rd)&7, (rs)&7); \
asm_output("xorpd %s,%s",gpn(rd),gpn(rs)); \ asm_output("xorpd %s,%s",gpn(rd),gpn(rs)); \
} while(0) } while(0)
// floating point unit // floating point unit
#define FPUc(o) \ #define FPUc(o) \
skipping to change at line 969 skipping to change at line 969
#define FDIVRdm(m) do { const double* const dm = m; \ #define FDIVRdm(m) do { const double* const dm = m; \
count_ldq(); FPUdm(0xdc07, dm); asm_output( "fdivr (%p)",(void*)dm); } while(0) count_ldq(); FPUdm(0xdc07, dm); asm_output( "fdivr (%p)",(void*)dm); } while(0)
#define FINCSTP() do { count_fpu(); FPUc(0xd9f7); asm_output( "fincstp"); } while(0) #define FINCSTP() do { count_fpu(); FPUc(0xd9f7); asm_output( "fincstp"); } while(0)
#define FSTP(r) do { count_fpu(); FPU(0xddd8, r&7); asm_output( "fstp %s",gpn(r)); fpu_pop();} while(0) #define FSTP(r) do { count_fpu(); FPU(0xddd8, r&7); asm_output( "fstp %s",gpn(r)); fpu_pop();} while(0)
#define FCOMP() do { count_fpu(); FPUc(0xD8D9); asm_output( "fcomp"); fpu_pop();} while(0) #define FCOMP() do { count_fpu(); FPUc(0xD8D9); asm_output( "fcomp"); fpu_pop();} while(0)
#define FCOMPP() do { count_fpu(); FPUc(0xDED9); asm_output( "fcompp"); fpu_pop();fpu_pop();} while(0) #define FCOMPP() do { count_fpu(); FPUc(0xDED9); asm_output( "fcompp"); fpu_pop();fpu_pop();} while(0)
#define FLDr(r) do { count_ldq(); FPU(0xd9c0,r); asm_output( "fld %s",gpn(r)); fpu_push(); } while(0) #define FLDr(r) do { count_ldq(); FPU(0xd9c0,r); asm_output( "fld %s",gpn(r)); fpu_push(); } while(0)
#define EMMS() do { count_fpu(); FPUc(0x0f77); asm_output( "emms"); } while (0) #define EMMS() do { count_fpu(); FPUc(0x0f77); asm_output( "emms"); } while (0)
// standard direct call // standard direct call
#define CALL(c) do { \ #define CALL(ci) do { \
count_call();\ count_call();\
underrunProtect(5); \ underrunProtect(5); \
int offset = (c->_address) - ((int)_nIns); \ int offset = (ci->_address) - ((int)_nIns); \
IMM32( (uint32_t)offset ); \ IMM32( (uint32_t)offset ); \
*(--_nIns) = 0xE8; \ *(--_nIns) = 0xE8; \
verbose_only(asm_output("call %s",(c->_name));) \ verbose_only(asm_output("call %s",(ci->_name));) \
debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();) debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\
\
} while (0) } while (0)
// indirect call thru register // indirect call thru register
#define CALLr(c,r) do { \ #define CALLr(ci,r) do { \
count_calli();\ count_calli();\
underrunProtect(2);\ underrunProtect(2);\
ALU(0xff, 2, (r));\ ALU(0xff, 2, (r));\
verbose_only(asm_output("call %s",gpn(r));) \ verbose_only(asm_output("call %s",gpn(r));) \
debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();) \ debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\
} while (0) } while (0)
} }
#endif // __nanojit_Nativei386__ #endif // __nanojit_Nativei386__
 End of changes. 9 change blocks. 
11 lines changed or deleted 10 lines changed or added


 VMPI.h   VMPI.h 
skipping to change at line 88 skipping to change at line 88
#include <stdint.h> #include <stdint.h>
#include <inttypes.h> #include <inttypes.h>
#endif #endif
#define VMPI_strlen strlen #define VMPI_strlen strlen
#define VMPI_strcat strcat #define VMPI_strcat strcat
#define VMPI_strcmp strcmp #define VMPI_strcmp strcmp
#define VMPI_strncat strncat #define VMPI_strncat strncat
#define VMPI_strcpy strcpy #define VMPI_strcpy strcpy
#define VMPI_sprintf sprintf #define VMPI_sprintf sprintf
#ifdef _MSC_VER
# define VMPI_snprintf sprintf_s
#else
# define VMPI_snprintf snprintf
#endif
#define VMPI_vfprintf vfprintf #define VMPI_vfprintf vfprintf
#define VMPI_memset memset #define VMPI_memset memset
#define VMPI_isdigit isdigit #define VMPI_isdigit isdigit
#define VMPI_getDate() #define VMPI_getDate()
extern void VMPI_setPageProtection(void *address, extern void VMPI_setPageProtection(void *address,
size_t size, size_t size,
bool executableFlag, bool executableFlag,
bool writeableFlag); bool writeableFlag);
 End of changes. 1 change blocks. 
0 lines changed or deleted 5 lines changed or added


 avmplus.h   avmplus.h 
skipping to change at line 93 skipping to change at line 93
#endif #endif
#define NJ_VERBOSE 1 #define NJ_VERBOSE 1
#define NJ_PROFILE 1 #define NJ_PROFILE 1
#include <stdarg.h> #include <stdarg.h>
#endif #endif
#ifdef _DEBUG #ifdef _DEBUG
void NanoAssertFail(); void NanoAssertFail();
#endif #endif
#define AvmAssert(x) assert(x)
#define AvmAssertMsg(x, y)
#define AvmDebugLog(x) printf x
#if defined(AVMPLUS_IA32) #if defined(AVMPLUS_IA32)
#if defined(_MSC_VER) #if defined(_MSC_VER)
# define AVMPLUS_HAS_RDTSC 1
__declspec(naked) static inline __int64 rdtsc() __declspec(naked) static inline __int64 rdtsc()
{ {
__asm __asm
{ {
rdtsc; rdtsc;
ret; ret;
} }
} }
#elif defined(SOLARIS) #elif defined(SOLARIS)
# define AVMPLUS_HAS_RDTSC 1
static inline unsigned long long rdtsc(void) static inline unsigned long long rdtsc(void)
{ {
unsigned long long int x; unsigned long long int x;
asm volatile (".byte 0x0f, 0x31" : "=A" (x)); asm volatile (".byte 0x0f, 0x31" : "=A" (x));
return x; return x;
} }
#elif defined(__i386__) #elif defined(__i386__)
# define AVMPLUS_HAS_RDTSC 1
static __inline__ unsigned long long rdtsc(void) static __inline__ unsigned long long rdtsc(void)
{ {
unsigned long long int x; unsigned long long int x;
__asm__ volatile (".byte 0x0f, 0x31" : "=A" (x)); __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
return x; return x;
} }
#endif /* compilers */ #endif /* compilers */
#elif defined(__x86_64__) #elif defined(__x86_64__)
# define AVMPLUS_HAS_RDTSC 1
static __inline__ uint64_t rdtsc(void) static __inline__ uint64_t rdtsc(void)
{ {
unsigned hi, lo; unsigned hi, lo;
__asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 ); return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
} }
#elif defined(_MSC_VER) && defined(_M_AMD64) #elif defined(_MSC_VER) && defined(_M_AMD64)
# define AVMPLUS_HAS_RDTSC 1
#include <intrin.h> #include <intrin.h>
#pragma intrinsic(__rdtsc) #pragma intrinsic(__rdtsc)
static inline unsigned __int64 rdtsc(void) static inline unsigned __int64 rdtsc(void)
{ {
return __rdtsc(); return __rdtsc();
} }
#elif defined(__powerpc__) #elif defined(__powerpc__)
# define AVMPLUS_HAS_RDTSC 1
typedef unsigned long long int unsigned long long; typedef unsigned long long int unsigned long long;
static __inline__ unsigned long long rdtsc(void) static __inline__ unsigned long long rdtsc(void)
{ {
unsigned long long int result=0; unsigned long long int result=0;
unsigned long int upper, lower,tmp; unsigned long int upper, lower,tmp;
__asm__ volatile( __asm__ volatile(
"0: \n" "0: \n"
"\tmftbu %0 \n" "\tmftbu %0 \n"
"\tmftb %1 \n" "\tmftb %1 \n"
skipping to change at line 168 skipping to change at line 182
); );
result = upper; result = upper;
result = result<<32; result = result<<32;
result = result|lower; result = result|lower;
return(result); return(result);
} }
#endif /* architecture */ #endif /* architecture */
#ifndef AVMPLUS_HAS_RDTSC
# define AVMPLUS_HAS_RDTSC 0
#endif
struct JSContext; struct JSContext;
#ifdef PERFM #ifdef PERFM
# define PERFM_NVPROF(n,v) _nvprof(n,v) # define PERFM_NVPROF(n,v) _nvprof(n,v)
# define PERFM_NTPROF(n) _ntprof(n) # define PERFM_NTPROF(n) _ntprof(n)
# define PERFM_TPROF_END() _tprof_end() # define PERFM_TPROF_END() _tprof_end()
#else #else
# define PERFM_NVPROF(n,v) # define PERFM_NVPROF(n,v)
# define PERFM_NTPROF(n) # define PERFM_NTPROF(n)
# define PERFM_TPROF_END() # define PERFM_TPROF_END()
skipping to change at line 253 skipping to change at line 271
return true; return true;
#endif #endif
} }
}; };
/** /**
* Bit vectors are an efficent method of keeping True/False information * Bit vectors are an efficent method of keeping True/False information
* on a set of items or conditions. Class BitSet provides functions * on a set of items or conditions. Class BitSet provides functions
* to manipulate individual bits in the vector. * to manipulate individual bits in the vector.
* *
* Since most vectors are rather small an array of longs is used by
* default to house the value of the bits. If more bits are needed
* then an array is allocated dynamically outside of this object.
*
* This object is not optimized for a fixed sized bit vector * This object is not optimized for a fixed sized bit vector
* it instead allows for dynamically growing the bit vector. * it instead allows for dynamically growing the bit vector.
*/ */
class BitSet class BitSet
{ {
public: public:
enum { kUnit = 8*sizeof(long), enum { kUnit = 8*sizeof(long),
kDefaultCapacity = 4 }; kDefaultCapacity = 4 };
BitSet() BitSet()
{ {
capacity = kDefaultCapacity; capacity = kDefaultCapacity;
ar = (long*)calloc(capacity, sizeof(long));
reset(); reset();
} }
~BitSet() ~BitSet()
{ {
if (capacity > kDefaultCapacity) free(ar);
free(bits.ptr);
} }
void reset() void reset()
{ {
if (capacity > kDefaultCapacity) for (int i = 0; i < capacity; i++)
for(int i=0; i<capacity; i++) ar[i] = 0;
bits.ptr[i] = 0;
else
for(int i=0; i<capacity; i++)
bits.ar[i] = 0;
} }
void set(int bitNbr) void set(int bitNbr)
{ {
int index = bitNbr / kUnit; int index = bitNbr / kUnit;
int bit = bitNbr % kUnit; int bit = bitNbr % kUnit;
if (index >= capacity) if (index >= capacity)
grow(index+1); grow(index+1);
if (capacity > kDefaultCapacity) ar[index] |= (1<<bit);
bits.ptr[index] |= (1<<bit);
else
bits.ar[index] |= (1<<bit);
} }
void clear(int bitNbr) void clear(int bitNbr)
{ {
int index = bitNbr / kUnit; int index = bitNbr / kUnit;
int bit = bitNbr % kUnit; int bit = bitNbr % kUnit;
if (index < capacity) if (index < capacity)
{ ar[index] &= ~(1<<bit);
if (capacity > kDefaultCapacity)
bits.ptr[index] &= ~(1<<bit);
else
bits.ar[index] &= ~(1<<bit);
}
} }
bool get(int bitNbr) const bool get(int bitNbr) const
{ {
int index = bitNbr / kUnit; int index = bitNbr / kUnit;
int bit = bitNbr % kUnit; int bit = bitNbr % kUnit;
bool value = false; bool value = false;
if (index < capacity) if (index < capacity)
{ value = ( ar[index] & (1<<bit) ) ? true : false;
if (capacity > kDefaultCapacity)
value = ( bits.ptr[index] & (1<<bit) ) ? true : fal
se;
else
value = ( bits.ar[index] & (1<<bit) ) ? true : fals
e;
}
return value; return value;
} }
private: private:
// Grow the array until at least newCapacity big // Grow the array until at least newCapacity big
void grow(int newCapacity) void grow(int newCapacity)
{ {
// create vector that is 2x bigger than requested // create vector that is 2x bigger than requested
newCapacity *= 2; newCapacity *= 2;
//MEMTAG("BitVector::Grow - long[]"); //MEMTAG("BitVector::Grow - long[]");
long* newBits = (long*)calloc(1, newCapacity * sizeof(long) long* newAr = (long*)calloc(newCapacity, sizeof(long));
);
//memset(newBits, 0, newCapacity * sizeof(long));
// copy the old one // copy the old one
if (capacity > kDefaultCapacity) for (int i = 0; i < capacity; i++)
for(int i=0; i<capacity; i++) newAr[i] = ar[i];
newBits[i] = bits.ptr[i];
else
for(int i=0; i<capacity; i++)
newBits[i] = bits.ar[i];
// in with the new out with the old // in with the new out with the old
if (capacity > kDefaultCapacity) free(ar);
free(bits.ptr);
bits.ptr = newBits; ar = newAr;
capacity = newCapacity; capacity = newCapacity;
} }
// by default we use the array, but if the vector
// size grows beyond kDefaultCapacity we allocate
// space dynamically.
int capacity; int capacity;
union long* ar;
{
long ar[kDefaultCapacity];
long* ptr;
}
bits;
}; };
} }
#endif #endif
 End of changes. 24 change blocks. 
55 lines changed or deleted 35 lines changed or added


 js-config.h   js-config.h 
skipping to change at line 52 skipping to change at line 52
#ifndef js_config_h___ #ifndef js_config_h___
#define js_config_h___ #define js_config_h___
/* Definitions set at build time that affect SpiderMonkey's public API. /* Definitions set at build time that affect SpiderMonkey's public API.
This header file is generated by the SpiderMonkey configure script, This header file is generated by the SpiderMonkey configure script,
and installed along with jsapi.h. */ and installed along with jsapi.h. */
/* Define to 1 if SpiderMonkey should support multi-threaded clients. */ /* Define to 1 if SpiderMonkey should support multi-threaded clients. */
/* #undef JS_THREADSAFE */ /* #undef JS_THREADSAFE */
/* Define to 1 if SpiderMonkey should include ctypes support. */
/* #undef JS_HAS_CTYPES */
/* Define to 1 if SpiderMonkey should support the ability to perform /* Define to 1 if SpiderMonkey should support the ability to perform
entirely too much GC. */ entirely too much GC. */
/* #undef JS_GC_ZEAL */ /* #undef JS_GC_ZEAL */
/* Define to 1 if the standard <stdint.h> header is present and /* Define to 1 if the standard <stdint.h> header is present and
useable. See jstypes.h and jsstdint.h. */ useable. See jstypes.h and jsstdint.h. */
#define JS_HAVE_STDINT_H 1 #define JS_HAVE_STDINT_H 1
/* Define to 1 if the <sys/types.h> defines int8_t, etc. */ /* Define to 1 if the <sys/types.h> defines int8_t, etc. */
/* #undef JS_SYS_TYPES_H_DEFINES_EXACT_SIZE_TYPES */ /* #undef JS_SYS_TYPES_H_DEFINES_EXACT_SIZE_TYPES */
 End of changes. 1 change blocks. 
0 lines changed or deleted 3 lines changed or added


 jsapi.h   jsapi.h 
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -* -
* vim: set ts=8 sw=4 et tw=78: * vim: set ts=8 sw=4 et tw=78:
* *
* ***** BEGIN LICENSE BLOCK ***** * ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
* *
* The contents of this file are subject to the Mozilla Public License Vers ion * The contents of this file are subject to the Mozilla Public License Vers ion
* 1.1 (the "License"); you may not use this file except in compliance with * 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at * the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/ * http://www.mozilla.org/MPL/
* *
skipping to change at line 470 skipping to change at line 470
extern JS_PUBLIC_API(JSString *) extern JS_PUBLIC_API(JSString *)
JS_ValueToString(JSContext *cx, jsval v); JS_ValueToString(JSContext *cx, jsval v);
extern JS_PUBLIC_API(JSString *) extern JS_PUBLIC_API(JSString *)
JS_ValueToSource(JSContext *cx, jsval v); JS_ValueToSource(JSContext *cx, jsval v);
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp); JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp);
extern JS_PUBLIC_API(JSBool)
JS_DoubleIsInt32(jsdouble d, jsint *ip);
/* /*
* Convert a value to a number, then to an int32, according to the ECMA rul es * Convert a value to a number, then to an int32, according to the ECMA rul es
* for ToInt32. * for ToInt32.
*/ */
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip); JS_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip);
/* /*
* Convert a value to a number, then to a uint32, according to the ECMA rul es * Convert a value to a number, then to a uint32, according to the ECMA rul es
* for ToUint32. * for ToUint32.
skipping to change at line 561 skipping to change at line 564
/* Yield to pending GC operations, regardless of request depth */ /* Yield to pending GC operations, regardless of request depth */
extern JS_PUBLIC_API(void) extern JS_PUBLIC_API(void)
JS_YieldRequest(JSContext *cx); JS_YieldRequest(JSContext *cx);
extern JS_PUBLIC_API(jsrefcount) extern JS_PUBLIC_API(jsrefcount)
JS_SuspendRequest(JSContext *cx); JS_SuspendRequest(JSContext *cx);
extern JS_PUBLIC_API(void) extern JS_PUBLIC_API(void)
JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth); JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth);
extern JS_PUBLIC_API(void)
JS_TransferRequest(JSContext *cx, JSContext *another);
#ifdef __cplusplus #ifdef __cplusplus
JS_END_EXTERN_C JS_END_EXTERN_C
class JSAutoRequest { class JSAutoRequest {
public: public:
JSAutoRequest(JSContext *cx JS_GUARD_OBJECT_NOTIFIER_PARAM) JSAutoRequest(JSContext *cx JS_GUARD_OBJECT_NOTIFIER_PARAM)
: mContext(cx), mSaveDepth(0) { : mContext(cx), mSaveDepth(0) {
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
JS_BeginRequest(mContext); JS_BeginRequest(mContext);
} }
skipping to change at line 626 skipping to change at line 632
jsrefcount mSaveDepth; jsrefcount mSaveDepth;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER JS_DECL_USE_GUARD_OBJECT_NOTIFIER
#if 0 #if 0
private: private:
static void *operator new(size_t) CPP_THROW_NEW { return 0; }; static void *operator new(size_t) CPP_THROW_NEW { return 0; };
static void operator delete(void *, size_t) { }; static void operator delete(void *, size_t) { };
#endif #endif
}; };
class JSAutoTransferRequest
{
public:
JSAutoTransferRequest(JSContext* cx1, JSContext* cx2)
: cx1(cx1), cx2(cx2) {
if(cx1 != cx2)
JS_TransferRequest(cx1, cx2);
}
~JSAutoTransferRequest() {
if(cx1 != cx2)
JS_TransferRequest(cx2, cx1);
}
private:
JSContext* const cx1;
JSContext* const cx2;
/* Not copyable. */
JSAutoTransferRequest(JSAutoTransferRequest &);
void operator =(JSAutoTransferRequest&);
};
JS_BEGIN_EXTERN_C JS_BEGIN_EXTERN_C
#endif #endif
extern JS_PUBLIC_API(void) extern JS_PUBLIC_API(void)
JS_Lock(JSRuntime *rt); JS_Lock(JSRuntime *rt);
extern JS_PUBLIC_API(void) extern JS_PUBLIC_API(void)
JS_Unlock(JSRuntime *rt); JS_Unlock(JSRuntime *rt);
extern JS_PUBLIC_API(JSContextCallback) extern JS_PUBLIC_API(JSContextCallback)
skipping to change at line 799 skipping to change at line 826
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key, JS_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
JSObject **objp); JSObject **objp);
extern JS_PUBLIC_API(JSObject *) extern JS_PUBLIC_API(JSObject *)
JS_GetScopeChain(JSContext *cx); JS_GetScopeChain(JSContext *cx);
extern JS_PUBLIC_API(JSObject *) extern JS_PUBLIC_API(JSObject *)
JS_GetGlobalForObject(JSContext *cx, JSObject *obj); JS_GetGlobalForObject(JSContext *cx, JSObject *obj);
#ifdef JS_HAS_CTYPES
/*
* Initialize the 'ctypes' object on a global variable 'obj'. The 'ctypes'
* object will be sealed.
*/
extern JS_PUBLIC_API(JSBool)
JS_InitCTypesClass(JSContext *cx, JSObject *global);
#endif
/* /*
* Macros to hide interpreter stack layout details from a JSFastNative usin g * Macros to hide interpreter stack layout details from a JSFastNative usin g
* its jsval *vp parameter. The stack layout underlying invocation can't ch ange * its jsval *vp parameter. The stack layout underlying invocation can't ch ange
* without breaking source and binary compatibility (argv[-2] is well-known to * without breaking source and binary compatibility (argv[-2] is well-known to
* be the callee jsval, and argv[-1] is as well known to be |this|). * be the callee jsval, and argv[-1] is as well known to be |this|).
* *
* Note well: However, argv[-1] may be JSVAL_NULL where with slow natives i t * Note well: However, argv[-1] may be JSVAL_NULL where with slow natives i t
* is the global object, so embeddings implementing fast natives *must* cal l * is the global object, so embeddings implementing fast natives *must* cal l
* JS_THIS or JS_THIS_OBJECT and test for failure indicated by a null retur n, * JS_THIS or JS_THIS_OBJECT and test for failure indicated by a null retur n,
* which should propagate as a false return from native functions and hooks . * which should propagate as a false return from native functions and hooks .
skipping to change at line 1682 skipping to change at line 1718
extern JS_PUBLIC_API(JSObject *) extern JS_PUBLIC_API(JSObject *)
JS_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto, JS_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
JSObject *parent); JSObject *parent);
extern JS_PUBLIC_API(JSObject *) extern JS_PUBLIC_API(JSObject *)
JS_ConstructObjectWithArguments(JSContext *cx, JSClass *clasp, JSObject *pr oto, JS_ConstructObjectWithArguments(JSContext *cx, JSClass *clasp, JSObject *pr oto,
JSObject *parent, uintN argc, jsval *argv); JSObject *parent, uintN argc, jsval *argv);
extern JS_PUBLIC_API(JSObject *) extern JS_PUBLIC_API(JSObject *)
JS_New(JSContext *cx, JSObject *ctor, uintN argc, jsval *argv);
extern JS_PUBLIC_API(JSObject *)
JS_DefineObject(JSContext *cx, JSObject *obj, const char *name, JSClass *cl asp, JS_DefineObject(JSContext *cx, JSObject *obj, const char *name, JSClass *cl asp,
JSObject *proto, uintN attrs); JSObject *proto, uintN attrs);
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_DefineConstDoubles(JSContext *cx, JSObject *obj, JSConstDoubleSpec *cds) ; JS_DefineConstDoubles(JSContext *cx, JSObject *obj, JSConstDoubleSpec *cds) ;
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_DefineProperties(JSContext *cx, JSObject *obj, JSPropertySpec *ps); JS_DefineProperties(JSContext *cx, JSObject *obj, JSPropertySpec *ps);
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_DefineProperty(JSContext *cx, JSObject *obj, const char *name, jsval val ue, JS_DefineProperty(JSContext *cx, JSObject *obj, const char *name, jsval val ue,
JSPropertyOp getter, JSPropertyOp setter, uintN attrs); JSPropertyOp getter, JSPropertyOp setter, uintN attrs);
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_DefinePropertyById(JSContext *cx, JSObject *obj, jsid id, jsval value, JS_DefinePropertyById(JSContext *cx, JSObject *obj, jsid id, jsval value,
JSPropertyOp getter, JSPropertyOp setter, uintN attrs ); JSPropertyOp getter, JSPropertyOp setter, uintN attrs );
extern JS_PUBLIC_API(JSBool)
JS_DefineOwnProperty(JSContext *cx, JSObject *obj, jsid id, jsval descripto
r, JSBool *bp);
/* /*
* Determine the attributes (JSPROP_* flags) of a property on a given objec t. * Determine the attributes (JSPROP_* flags) of a property on a given objec t.
* *
* If the object does not have a property by that name, *foundp will be * If the object does not have a property by that name, *foundp will be
* JS_FALSE and the value of *attrsp is undefined. * JS_FALSE and the value of *attrsp is undefined.
*/ */
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_GetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name, JS_GetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name,
uintN *attrsp, JSBool *foundp); uintN *attrsp, JSBool *foundp);
skipping to change at line 1794 skipping to change at line 1836
/* /*
* Like JS_GetPropertyAttrsGetterAndSetterById but will return a property o n * Like JS_GetPropertyAttrsGetterAndSetterById but will return a property o n
* an object on the prototype chain (returned in objp). If data->obj is nul l, * an object on the prototype chain (returned in objp). If data->obj is nul l,
* then this property was not found on the prototype chain. * then this property was not found on the prototype chain.
*/ */
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_GetPropertyDescriptorById(JSContext *cx, JSObject *obj, jsid id, uintN f lags, JS_GetPropertyDescriptorById(JSContext *cx, JSObject *obj, jsid id, uintN f lags,
JSPropertyDescriptor *desc); JSPropertyDescriptor *desc);
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_GetOwnPropertyDescriptor(JSContext *cx, JSObject *obj, jsid id, jsval *v
p);
extern JS_PUBLIC_API(JSBool)
JS_GetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp); JS_GetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp);
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_GetPropertyById(JSContext *cx, JSObject *obj, jsid id, jsval *vp); JS_GetPropertyById(JSContext *cx, JSObject *obj, jsid id, jsval *vp);
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_GetMethodById(JSContext *cx, JSObject *obj, jsid id, JSObject **objp, JS_GetMethodById(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
jsval *vp); jsval *vp);
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
skipping to change at line 2226 skipping to change at line 2271
*/ */
#define JS_DONT_PRETTY_PRINT ((uintN)0x8000) #define JS_DONT_PRETTY_PRINT ((uintN)0x8000)
extern JS_PUBLIC_API(JSString *) extern JS_PUBLIC_API(JSString *)
JS_DecompileFunction(JSContext *cx, JSFunction *fun, uintN indent); JS_DecompileFunction(JSContext *cx, JSFunction *fun, uintN indent);
extern JS_PUBLIC_API(JSString *) extern JS_PUBLIC_API(JSString *)
JS_DecompileFunctionBody(JSContext *cx, JSFunction *fun, uintN indent); JS_DecompileFunctionBody(JSContext *cx, JSFunction *fun, uintN indent);
/* /*
* NB: JS_ExecuteScript, JS_ExecuteScriptPart, and the JS_Evaluate*Script* * NB: JS_ExecuteScript and the JS_Evaluate*Script* quadruplets use the obj
* quadruplets all use the obj parameter as the initial scope chain header, * parameter as the initial scope chain header, the 'this' keyword value, a
* the 'this' keyword value, and the variables object (ECMA parlance for wh nd
ere * the variables object (ECMA parlance for where 'var' and 'function' bind
* 'var' and 'function' bind names) of the execution context for script. * names) of the execution context for script.
* *
* Using obj as the variables object is problematic if obj's parent (which is * Using obj as the variables object is problematic if obj's parent (which is
* the scope chain link; see JS_SetParent and JS_NewObject) is not null: in * the scope chain link; see JS_SetParent and JS_NewObject) is not null: in
* this case, variables created by 'var x = 0', e.g., go in obj, but variab les * this case, variables created by 'var x = 0', e.g., go in obj, but variab les
* created by assignment to an unbound id, 'x = 0', go in the last object o n * created by assignment to an unbound id, 'x = 0', go in the last object o n
* the scope chain linked by parent. * the scope chain linked by parent.
* *
* ECMA calls that last scoping object the "global object", but note that m any * ECMA calls that last scoping object the "global object", but note that m any
* embeddings have several such objects. ECMA requires that "global code" be * embeddings have several such objects. ECMA requires that "global code" be
* executed with the variables object equal to this global object. But the se * executed with the variables object equal to this global object. But the se
skipping to change at line 2270 skipping to change at line 2315
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *script, jsval *rva l); JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *script, jsval *rva l);
/* /*
* Execute either the function-defining prolog of a script, or the script's * Execute either the function-defining prolog of a script, or the script's
* main body, but not both. * main body, but not both.
*/ */
typedef enum JSExecPart { JSEXEC_PROLOG, JSEXEC_MAIN } JSExecPart; typedef enum JSExecPart { JSEXEC_PROLOG, JSEXEC_MAIN } JSExecPart;
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_ExecuteScriptPart(JSContext *cx, JSObject *obj, JSScript *script,
JSExecPart part, jsval *rval);
extern JS_PUBLIC_API(JSBool)
JS_EvaluateScript(JSContext *cx, JSObject *obj, JS_EvaluateScript(JSContext *cx, JSObject *obj,
const char *bytes, uintN length, const char *bytes, uintN length,
const char *filename, uintN lineno, const char *filename, uintN lineno,
jsval *rval); jsval *rval);
extern JS_PUBLIC_API(JSBool) extern JS_PUBLIC_API(JSBool)
JS_EvaluateScriptForPrincipals(JSContext *cx, JSObject *obj, JS_EvaluateScriptForPrincipals(JSContext *cx, JSObject *obj,
JSPrincipals *principals, JSPrincipals *principals,
const char *bytes, uintN length, const char *bytes, uintN length,
const char *filename, uintN lineno, const char *filename, uintN lineno,
 End of changes. 10 change blocks. 
10 lines changed or deleted 53 lines changed or added


 jsarray.h   jsarray.h 
skipping to change at line 70 skipping to change at line 70
{ {
return getClass() == &js_ArrayClass; return getClass() == &js_ArrayClass;
} }
inline bool inline bool
JSObject::isArray() const JSObject::isArray() const
{ {
return isDenseArray() || getClass() == &js_SlowArrayClass; return isDenseArray() || getClass() == &js_SlowArrayClass;
} }
#define OBJ_IS_DENSE_ARRAY(cx,obj) (obj)->isDenseArray()
#define OBJ_IS_ARRAY(cx,obj) (obj)->isArray()
/* /*
* Dense arrays are not native (OBJ_IS_NATIVE(cx, aobj) for a dense array a * Dense arrays are not native -- aobj->isNative() for a dense array aobj
obj * results in false, meaning aobj->map does not point to a JSScope.
* results in false, meaning aobj->map does not point to a JSScope).
* *
* But Array methods are called via aobj.sort(), e.g., and the interpreter and * But Array methods are called via aobj.sort(), e.g., and the interpreter and
* the trace recorder must consult the property cache in order to perform w ell. * the trace recorder must consult the property cache in order to perform w ell.
* The cache works only for native objects. * The cache works only for native objects.
* *
* Therefore the interpreter (js_Interpret in JSOP_GETPROP and JSOP_CALLPRO P) * Therefore the interpreter (js_Interpret in JSOP_GETPROP and JSOP_CALLPRO P)
* and js_GetPropertyHelper use this inline function to skip up one link in the * and js_GetPropertyHelper use this inline function to skip up one link in the
* prototype chain when obj is a dense array, in order to find a native obj ect * prototype chain when obj is a dense array, in order to find a native obj ect
* (to wit, Array.prototype) in which to probe for cached methods. * (to wit, Array.prototype) in which to probe for cached methods.
* *
* Note that setting aobj.__proto__ for a dense array aobj turns aobj into a * Note that setting aobj.__proto__ for a dense array aobj turns aobj into a
* slow array, avoiding the neede to skip. * slow array, avoiding the neede to skip.
* *
* Callers of js_GetProtoIfDenseArray must take care to use the original ob ject * Callers of js_GetProtoIfDenseArray must take care to use the original ob ject
* (obj) for the |this| value of a getter, setter, or method call (bug 4764 47). * (obj) for the |this| value of a getter, setter, or method call (bug 4764 47).
*/ */
static JS_INLINE JSObject * static JS_INLINE JSObject *
js_GetProtoIfDenseArray(JSContext *cx, JSObject *obj) js_GetProtoIfDenseArray(JSObject *obj)
{ {
return OBJ_IS_DENSE_ARRAY(cx, obj) ? OBJ_GET_PROTO(cx, obj) : obj; return obj->isDenseArray() ? obj->getProto() : obj;
} }
extern JSObject * extern JSObject *
js_InitArrayClass(JSContext *cx, JSObject *obj); js_InitArrayClass(JSContext *cx, JSObject *obj);
extern bool extern bool
js_InitContextBusyArrayTable(JSContext *cx); js_InitContextBusyArrayTable(JSContext *cx);
/* /*
* Creates a new array with the given length and proto (NB: NULL is not * Creates a new array with the given length and proto (NB: NULL is not
* translated to Array.prototype), with len slots preallocated. * translated to Array.prototype), with len slots preallocated.
*/ */
extern JSObject * JS_FASTCALL extern JSObject * JS_FASTCALL
js_NewArrayWithSlots(JSContext* cx, JSObject* proto, uint32 len); js_NewArrayWithSlots(JSContext* cx, JSObject* proto, uint32 len);
extern JSObject * extern JSObject *
js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector, js_NewArrayObject(JSContext *cx, jsuint length, const jsval *vector, bool h
JSBool holey = JS_FALSE); oley = false);
/* Create an array object that starts out already made slow/sparse. */ /* Create an array object that starts out already made slow/sparse. */
extern JSObject * extern JSObject *
js_NewSlowArrayObject(JSContext *cx); js_NewSlowArrayObject(JSContext *cx);
extern JSBool extern JSBool
js_MakeArraySlow(JSContext *cx, JSObject *obj); js_MakeArraySlow(JSContext *cx, JSObject *obj);
#define JSSLOT_ARRAY_LENGTH JSSLOT_PRIVATE #define JSSLOT_ARRAY_LENGTH JSSLOT_PRIVATE
#define JSSLOT_ARRAY_COUNT (JSSLOT_ARRAY_LENGTH + 1) #define JSSLOT_ARRAY_COUNT (JSSLOT_ARRAY_LENGTH + 1)
 End of changes. 5 change blocks. 
10 lines changed or deleted 6 lines changed or added


 jsatom.h   jsatom.h 
skipping to change at line 90 skipping to change at line 90
* longer if cx's string newborn root is not overwritten. * longer if cx's string newborn root is not overwritten.
*/ */
extern const char * extern const char *
js_AtomToPrintableString(JSContext *cx, JSAtom *atom); js_AtomToPrintableString(JSContext *cx, JSAtom *atom);
struct JSAtomListElement { struct JSAtomListElement {
JSHashEntry entry; JSHashEntry entry;
}; };
#define ALE_ATOM(ale) ((JSAtom *) (ale)->entry.key) #define ALE_ATOM(ale) ((JSAtom *) (ale)->entry.key)
#define ALE_INDEX(ale) ((jsatomid) JS_PTR_TO_UINT32((ale)->entry.value)) #define ALE_INDEX(ale) (jsatomid(uintptr_t((ale)->entry.value)))
#define ALE_VALUE(ale) ((jsval) (ale)->entry.value) #define ALE_VALUE(ale) ((jsval) (ale)->entry.value)
#define ALE_NEXT(ale) ((JSAtomListElement *) (ale)->entry.next) #define ALE_NEXT(ale) ((JSAtomListElement *) (ale)->entry.next)
/* /*
* In an upvars list, ALE_DEFN(ale)->resolve() is the outermost definition the * In an upvars list, ALE_DEFN(ale)->resolve() is the outermost definition the
* name may reference. If a with block or a function that calls eval enclos es * name may reference. If a with block or a function that calls eval enclos es
* the use, the name may end up referring to something else at runtime. * the use, the name may end up referring to something else at runtime.
*/ */
#define ALE_DEFN(ale) ((JSDefinition *) (ale)->entry.value) #define ALE_DEFN(ale) ((JSDefinition *) (ale)->entry.value)
#define ALE_SET_ATOM(ale,atom) ((ale)->entry.key = (const void *)(atom)) #define ALE_SET_ATOM(ale,atom) ((ale)->entry.key = (const void *)(atom))
#define ALE_SET_INDEX(ale,index)((ale)->entry.value = JS_UINT32_TO_PTR(inde x)) #define ALE_SET_INDEX(ale,index)((ale)->entry.value = (void *)(index))
#define ALE_SET_DEFN(ale, dn) ((ale)->entry.value = (void *)(dn)) #define ALE_SET_DEFN(ale, dn) ((ale)->entry.value = (void *)(dn))
#define ALE_SET_VALUE(ale, v) ((ale)->entry.value = (void *)(v)) #define ALE_SET_VALUE(ale, v) ((ale)->entry.value = (void *)(v))
#define ALE_SET_NEXT(ale,nxt) ((ale)->entry.next = (JSHashEntry *)(nxt)) #define ALE_SET_NEXT(ale,nxt) ((ale)->entry.next = (JSHashEntry *)(nxt))
/* /*
* NB: JSAtomSet must be plain-old-data as it is embedded in the pn_u union in * NB: JSAtomSet must be plain-old-data as it is embedded in the pn_u union in
* JSParseNode. JSAtomList encapsulates all operational uses of a JSAtomSet . * JSParseNode. JSAtomList encapsulates all operational uses of a JSAtomSet .
* *
* The JSAtomList name is traditional, even though the implementation is a map * The JSAtomList name is traditional, even though the implementation is a map
* (not to be confused with JSAtomMap). In particular the "ALE" and "ale" s hort * (not to be confused with JSAtomMap). In particular the "ALE" and "ale" s hort
skipping to change at line 249 skipping to change at line 249
/* Various built-in or commonly-used atoms, pinned on first context. */ /* Various built-in or commonly-used atoms, pinned on first context. */
JSAtom *anonymousAtom; JSAtom *anonymousAtom;
JSAtom *applyAtom; JSAtom *applyAtom;
JSAtom *argumentsAtom; JSAtom *argumentsAtom;
JSAtom *arityAtom; JSAtom *arityAtom;
JSAtom *callAtom; JSAtom *callAtom;
JSAtom *calleeAtom; JSAtom *calleeAtom;
JSAtom *callerAtom; JSAtom *callerAtom;
JSAtom *classPrototypeAtom; JSAtom *classPrototypeAtom;
JSAtom *constructorAtom; JSAtom *constructorAtom;
JSAtom *countAtom;
JSAtom *eachAtom; JSAtom *eachAtom;
JSAtom *evalAtom; JSAtom *evalAtom;
JSAtom *fileNameAtom; JSAtom *fileNameAtom;
JSAtom *getAtom; JSAtom *getAtom;
JSAtom *getterAtom; JSAtom *getterAtom;
JSAtom *indexAtom; JSAtom *indexAtom;
JSAtom *inputAtom; JSAtom *inputAtom;
JSAtom *iteratorAtom; JSAtom *iteratorAtom;
JSAtom *lengthAtom; JSAtom *lengthAtom;
JSAtom *lineNumberAtom; JSAtom *lineNumberAtom;
 End of changes. 3 change blocks. 
3 lines changed or deleted 2 lines changed or added


 jsautocfg.h   jsautocfg.h 
#ifndef js_cpucfg___ #ifndef js_cpucfg___
#define js_cpucfg___ #define js_cpucfg___
/* AUTOMATICALLY GENERATED - DO NOT EDIT */ /* AUTOMATICALLY GENERATED - DO NOT EDIT */
#define IS_LITTLE_ENDIAN 1 #define IS_LITTLE_ENDIAN 1
#undef IS_BIG_ENDIAN #undef IS_BIG_ENDIAN
#define JS_STACK_GROWTH_DIRECTION (-1) #ifdef __hppa
# define JS_STACK_GROWTH_DIRECTION (1)
#else
# define JS_STACK_GROWTH_DIRECTION (-1)
#endif
#endif /* js_cpucfg___ */ #endif /* js_cpucfg___ */
 End of changes. 1 change blocks. 
1 lines changed or deleted 5 lines changed or added


 jsbuiltins.h   jsbuiltins.h 
skipping to change at line 119 skipping to change at line 119
*/ */
#define JSVAL_ERROR_COOKIE OBJECT_TO_JSVAL((JSObject*)0x10) #define JSVAL_ERROR_COOKIE OBJECT_TO_JSVAL((JSObject*)0x10)
/* Macros used by JS_DEFINE_CALLINFOn. */ /* Macros used by JS_DEFINE_CALLINFOn. */
#ifdef DEBUG #ifdef DEBUG
#define _JS_CI_NAME(op) ,#op #define _JS_CI_NAME(op) ,#op
#else #else
#define _JS_CI_NAME(op) #define _JS_CI_NAME(op)
#endif #endif
#define _JS_I32_ARGSIZE nanojit::ARGSIZE_I #define _JS_I32_ARGTYPE nanojit::ARGTYPE_I
#define _JS_I32_RETSIZE nanojit::ARGSIZE_I #define _JS_I32_RETTYPE nanojit::ARGTYPE_I
#define _JS_F64_ARGSIZE nanojit::ARGSIZE_F #define _JS_F64_ARGTYPE nanojit::ARGTYPE_F
#define _JS_F64_RETSIZE nanojit::ARGSIZE_F #define _JS_F64_RETTYPE nanojit::ARGTYPE_F
#define _JS_PTR_ARGSIZE nanojit::ARGSIZE_P #define _JS_PTR_ARGTYPE nanojit::ARGTYPE_P
#define _JS_PTR_RETSIZE nanojit::ARGSIZE_P #define _JS_PTR_RETTYPE nanojit::ARGTYPE_P
struct ClosureVarInfo; struct ClosureVarInfo;
/* /*
* Supported types for builtin functions. * Supported types for builtin functions.
* *
* Types with -- for the two string fields are not permitted as argument ty pes * Types with -- for the two string fields are not permitted as argument ty pes
* in JS_DEFINE_TRCINFO. * in JS_DEFINE_TRCINFO.
* *
* There are three kinds of traceable-native error handling. * There are three kinds of traceable-native error handling.
skipping to change at line 236 skipping to change at line 236
#define _JS_CTYPE_DOUBLEPTR _JS_CTYPE(double *, _JS_P TR, --, --, INFALLIBLE) #define _JS_CTYPE_DOUBLEPTR _JS_CTYPE(double *, _JS_P TR, --, --, INFALLIBLE)
#define _JS_CTYPE_CHARPTR _JS_CTYPE(char *, _JS_P TR, --, --, INFALLIBLE) #define _JS_CTYPE_CHARPTR _JS_CTYPE(char *, _JS_P TR, --, --, INFALLIBLE)
#define _JS_CTYPE_APNPTR _JS_CTYPE(ArgsPrivateNative *, _JS_P TR, --, --, INFALLIBLE) #define _JS_CTYPE_APNPTR _JS_CTYPE(ArgsPrivateNative *, _JS_P TR, --, --, INFALLIBLE)
#define _JS_CTYPE_CVIPTR _JS_CTYPE(const ClosureVarInfo *, _JS_P TR, --, --, INFALLIBLE) #define _JS_CTYPE_CVIPTR _JS_CTYPE(const ClosureVarInfo *, _JS_P TR, --, --, INFALLIBLE)
#define _JS_CTYPE_FRAMEINFO _JS_CTYPE(FrameInfo *, _JS_P TR, --, --, INFALLIBLE) #define _JS_CTYPE_FRAMEINFO _JS_CTYPE(FrameInfo *, _JS_P TR, --, --, INFALLIBLE)
#define _JS_EXPAND(tokens) tokens #define _JS_EXPAND(tokens) tokens
#define _JS_CTYPE_TYPE2(t,s,p,a,f) t #define _JS_CTYPE_TYPE2(t,s,p,a,f) t
#define _JS_CTYPE_TYPE(tyname) _JS_EXPAND(_JS_CTYPE_TYPE2 _JS_C TYPE_##tyname) #define _JS_CTYPE_TYPE(tyname) _JS_EXPAND(_JS_CTYPE_TYPE2 _JS_C TYPE_##tyname)
#define _JS_CTYPE_RETSIZE2(t,s,p,a,f) s##_RETSIZE #define _JS_CTYPE_RETTYPE2(t,s,p,a,f) s##_RETTYPE
#define _JS_CTYPE_RETSIZE(tyname) _JS_EXPAND(_JS_CTYPE_RETSIZE2 _JS_C #define _JS_CTYPE_RETTYPE(tyname) _JS_EXPAND(_JS_CTYPE_RETTYPE2 _JS_C
TYPE_##tyname) TYPE_##tyname)
#define _JS_CTYPE_ARGSIZE2(t,s,p,a,f) s##_ARGSIZE #define _JS_CTYPE_ARGTYPE2(t,s,p,a,f) s##_ARGTYPE
#define _JS_CTYPE_ARGSIZE(tyname) _JS_EXPAND(_JS_CTYPE_ARGSIZE2 _JS_C #define _JS_CTYPE_ARGTYPE(tyname) _JS_EXPAND(_JS_CTYPE_ARGTYPE2 _JS_C
TYPE_##tyname) TYPE_##tyname)
#define _JS_CTYPE_PCH2(t,s,p,a,f) p #define _JS_CTYPE_PCH2(t,s,p,a,f) p
#define _JS_CTYPE_PCH(tyname) _JS_EXPAND(_JS_CTYPE_PCH2 _JS_C TYPE_##tyname) #define _JS_CTYPE_PCH(tyname) _JS_EXPAND(_JS_CTYPE_PCH2 _JS_C TYPE_##tyname)
#define _JS_CTYPE_ACH2(t,s,p,a,f) a #define _JS_CTYPE_ACH2(t,s,p,a,f) a
#define _JS_CTYPE_ACH(tyname) _JS_EXPAND(_JS_CTYPE_ACH2 _JS_C TYPE_##tyname) #define _JS_CTYPE_ACH(tyname) _JS_EXPAND(_JS_CTYPE_ACH2 _JS_C TYPE_##tyname)
#define _JS_CTYPE_FLAGS2(t,s,p,a,f) f #define _JS_CTYPE_FLAGS2(t,s,p,a,f) f
#define _JS_CTYPE_FLAGS(tyname) _JS_EXPAND(_JS_CTYPE_FLAGS2 _JS_C TYPE_##tyname) #define _JS_CTYPE_FLAGS(tyname) _JS_EXPAND(_JS_CTYPE_FLAGS2 _JS_C TYPE_##tyname)
#define _JS_static_TN(t) static t #define _JS_static_TN(t) static t
#define _JS_static_CI static #define _JS_static_CI static
#define _JS_extern_TN(t) extern t #define _JS_extern_TN(t) extern t
#define _JS_extern_CI #define _JS_extern_CI
#define _JS_FRIEND_TN(t) extern JS_FRIEND_API(t) #define _JS_FRIEND_TN(t) extern JS_FRIEND_API(t)
#define _JS_FRIEND_CI #define _JS_FRIEND_CI
#define _JS_TN_LINKAGE(linkage, t) _JS_##linkage##_TN(t) #define _JS_TN_LINKAGE(linkage, t) _JS_##linkage##_TN(t)
#define _JS_CI_LINKAGE(linkage) _JS_##linkage##_CI #define _JS_CI_LINKAGE(linkage) _JS_##linkage##_CI
#define _JS_CALLINFO(name) name##_ci #define _JS_CALLINFO(name) name##_ci
#if defined(JS_NO_FASTCALL) && defined(NANOJIT_IA32) #if defined(JS_NO_FASTCALL) && defined(NANOJIT_IA32)
#define _JS_DEFINE_CALLINFO(linkage, name, crtype, cargtypes, argtypes, cse , fold) \ #define _JS_DEFINE_CALLINFO(linkage, name, crtype, cargtypes, argtypes, isP ure, storeAccSet) \
_JS_TN_LINKAGE(linkage, crtype) name cargtypes; \ _JS_TN_LINKAGE(linkage, crtype) name cargtypes; \
_JS_CI_LINKAGE(linkage) const nanojit::CallInfo _JS_CALLINFO(name) = \ _JS_CI_LINKAGE(linkage) const nanojit::CallInfo _JS_CALLINFO(name) = \
{ (intptr_t) &name, argtypes, cse, fold, nanojit::ABI_CDECL _JS_CI_ { (intptr_t) &name, argtypes, nanojit::ABI_CDECL, isPure, storeAccS
NAME(name) }; et _JS_CI_NAME(name) };\
JS_STATIC_ASSERT_IF(isPure, storeAccSet == nanojit::ACC_NONE);
#else #else
#define _JS_DEFINE_CALLINFO(linkage, name, crtype, cargtypes, argtypes, cse , fold) \ #define _JS_DEFINE_CALLINFO(linkage, name, crtype, cargtypes, argtypes, isP ure, storeAccSet) \
_JS_TN_LINKAGE(linkage, crtype) FASTCALL name cargtypes; \ _JS_TN_LINKAGE(linkage, crtype) FASTCALL name cargtypes; \
_JS_CI_LINKAGE(linkage) const nanojit::CallInfo _JS_CALLINFO(name) = \ _JS_CI_LINKAGE(linkage) const nanojit::CallInfo _JS_CALLINFO(name) = \
{ (intptr_t) &name, argtypes, cse, fold, nanojit::ABI_FASTCALL _JS_ { (intptr_t) &name, argtypes, nanojit::ABI_FASTCALL, isPure, storeA
CI_NAME(name) }; ccSet _JS_CI_NAME(name) }; \
JS_STATIC_ASSERT_IF(isPure, storeAccSet == nanojit::ACC_NONE);
#endif #endif
/* /*
* This macro is used for builtin functions that can be called from JITted * This macro is used for builtin functions that can be called from JITted
* code. It declares a C function named <op> and a CallInfo struct named * code. It declares a C function named <op> and a CallInfo struct named
* <op>_ci so the tracer can call it. The <N> in JS_DEFINE_CALLINFO_<N> is * <op>_ci so the tracer can call it. The <N> in JS_DEFINE_CALLINFO_<N> is
* the number of arguments the builtin takes. Builtins with no arguments * the number of arguments the builtin takes. Builtins with no arguments
* are not supported. Using a macro is clunky but ensures that the types * are not supported. Using a macro is clunky but ensures that the types
* for each C function matches those for the corresponding CallInfo struct; * for each C function matches those for the corresponding CallInfo struct;
* mismatched types can cause subtle problems. * mismatched types can cause subtle problems.
skipping to change at line 292 skipping to change at line 295
* can be extern, static, or FRIEND, which specifies JS_FRIEND_API linkag e * can be extern, static, or FRIEND, which specifies JS_FRIEND_API linkag e
* for the function. * for the function.
* *
* - The return type. This identifier must name one of the _JS_TYPEINFO_* * - The return type. This identifier must name one of the _JS_TYPEINFO_*
* macros defined in jsbuiltins.h. * macros defined in jsbuiltins.h.
* *
* - The builtin name. * - The builtin name.
* *
* - The parameter types. * - The parameter types.
* *
* - The cse flag. 1 if the builtin call can be optimized away by common * - The isPure flag. Set to 1 if:
* subexpression elimination; otherwise 0. This should be 1 only if the * (a) the function's return value is determined solely by its arguments
* function is idempotent and the return value is determined solely by th * (ie. no hidden state, no implicit inputs used such as global
e * variables or the result of an I/O operation); and
* arguments. * (b) the function causes no observable side-effects (ie. no writes to
* global variables, no I/O output).
* Multiple calls to a pure function can be merged during CSE.
* *
* - The fold flag. Reserved. The same as cse for now. * - The storeAccSet. This indicates which memory access regions the funct
ion
* accesses. It must be ACC_NONE if the function is pure; use
* ACC_STORE_ANY if you're not sure. Used to determine if each call site
of
* the function aliases any loads.
*/ */
#define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, cse, fold) \ #define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, isPure, storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), (_JS_CTYPE_TYPE(at 0)), \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), (_JS_CTYPE_TYPE(at 0)), \
(_JS_CTYPE_ARGSIZE(at0) << (1*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at0) << (1*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
_JS_CTYPE_RETSIZE(rt), cse, fold) _JS_CTYPE_RETTYPE(rt),
#define JS_DEFINE_CALLINFO_2(linkage, rt, op, at0, at1, cse, fold) \
\ isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_2(linkage, rt, op, at0, at1, isPure, storeAccSet
) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1)), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1)), \
(_JS_CTYPE_ARGSIZE(at0) << (2*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at0) << (2*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at1) << (1*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at1) << (1*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
_JS_CTYPE_RETSIZE(rt), _JS_CTYPE_RETTYPE(rt),
\ \
cse, fold) isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_3(linkage, rt, op, at0, at1, at2, cse, fold) #define JS_DEFINE_CALLINFO_3(linkage, rt, op, at0, at1, at2, isPure, storeA
\ ccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2)), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2)), \
(_JS_CTYPE_ARGSIZE(at0) << (3*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at0) << (3*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at1) << (2*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at1) << (2*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at2) << (1*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at2) << (1*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
_JS_CTYPE_RETSIZE(rt), _JS_CTYPE_RETTYPE(rt),
\ \
cse, fold) isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_4(linkage, rt, op, at0, at1, at2, at3, cse, fold #define JS_DEFINE_CALLINFO_4(linkage, rt, op, at0, at1, at2, at3, isPure, s
) \ toreAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \
_JS_CTYPE_TYPE(at3)), \ _JS_CTYPE_TYPE(at3)), \
(_JS_CTYPE_ARGSIZE(at0) << (4*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at0) << (4*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at1) << (3*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at1) << (3*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at2) << (2*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at2) << (2*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at3) << (1*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at3) << (1*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
_JS_CTYPE_RETSIZE(rt), _JS_CTYPE_RETTYPE(rt),
\ \
cse, fold) isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_5(linkage, rt, op, at0, at1, at2, at3, at4, cse, #define JS_DEFINE_CALLINFO_5(linkage, rt, op, at0, at1, at2, at3, at4, isPu
fold) \ re, storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \
_JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4)), \ _JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4)), \
(_JS_CTYPE_ARGSIZE(at0) << (5*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at0) << (5*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at1) << (4*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at1) << (4*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at2) << (3*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at2) << (3*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at3) << (2*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at3) << (2*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at4) << (1*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at4) << (1*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
_JS_CTYPE_RETSIZE(rt), _JS_CTYPE_RETTYPE(rt),
\ \
cse, fold) isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, #define JS_DEFINE_CALLINFO_6(linkage, rt, op, at0, at1, at2, at3, at4, at5,
cse, fold) \ isPure, storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \
_JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYP E_TYPE(at5)), \ _JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYP E_TYPE(at5)), \
(_JS_CTYPE_ARGSIZE(at0) << (6*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at0) << (6*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at1) << (5*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at1) << (5*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at2) << (4*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at2) << (4*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at3) << (3*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at3) << (3*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at4) << (2*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at4) << (2*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at5) << (1*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at5) << (1*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
_JS_CTYPE_RETSIZE(rt), cse, fold) _JS_CTYPE_RETTYPE(rt),
#define JS_DEFINE_CALLINFO_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, \
at6, cse, fold) \ isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_7(linkage, rt, op, at0, at1, at2, at3, at4, at5,
at6, isPure, \
storeAccSet)
\
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \
_JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYP E_TYPE(at5), \ _JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYP E_TYPE(at5), \
_JS_CTYPE_TYPE(at6)), \ _JS_CTYPE_TYPE(at6)), \
(_JS_CTYPE_ARGSIZE(at0) << (7*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at0) << (7*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at1) << (6*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at1) << (6*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at2) << (5*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at2) << (5*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at3) << (4*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at3) << (4*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at4) << (3*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at4) << (3*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at5) << (2*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at5) << (2*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at6) << (1*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at6) << (1*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
_JS_CTYPE_RETSIZE(rt), cse, fold) _JS_CTYPE_RETTYPE(rt),
#define JS_DEFINE_CALLINFO_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, \
at6, at7, cse, fold) \ isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_8(linkage, rt, op, at0, at1, at2, at3, at4, at5,
at6, at7, isPure, \
storeAccSet)
\
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYP E_TYPE(at2), \
_JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYP E_TYPE(at5), \ _JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYP E_TYPE(at5), \
_JS_CTYPE_TYPE(at6), _JS_CTYPE_TYPE(at7)), \ _JS_CTYPE_TYPE(at6), _JS_CTYPE_TYPE(at7)), \
(_JS_CTYPE_ARGSIZE(at0) << (8*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at0) << (8*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at1) << (7*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at1) << (7*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at2) << (6*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at2) << (6*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at3) << (5*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at3) << (5*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at4) << (4*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at4) << (4*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at5) << (3*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at5) << (3*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at6) << (2*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at6) << (2*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
(_JS_CTYPE_ARGSIZE(at7) << (1*nanojit::ARGSIZE_SHIF (_JS_CTYPE_ARGTYPE(at7) << (1*nanojit::ARGTYPE_SHIF
T)) | \ T)) | \
_JS_CTYPE_RETSIZE(rt), cse, fold) _JS_CTYPE_RETTYPE(rt),
\
isPure, storeAccSet)
#define JS_DECLARE_CALLINFO(name) extern const nanojit::CallInfo _JS_CALLI NFO(name); #define JS_DECLARE_CALLINFO(name) extern const nanojit::CallInfo _JS_CALLI NFO(name);
#define _JS_TN_INIT_HELPER_n(n, args) _JS_TN_INIT_HELPER_##n args #define _JS_TN_INIT_HELPER_n(n, args) _JS_TN_INIT_HELPER_##n args
#define _JS_TN_INIT_HELPER_1(linkage, rt, op, at0, cse, fold) \ #define _JS_TN_INIT_HELPER_1(linkage, rt, op, at0, isPure, storeAccSet) \
&_JS_CALLINFO(op), \ &_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at0), \ _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at0), \ _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt) _JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_2(linkage, rt, op, at0, at1, cse, fold) \ #define _JS_TN_INIT_HELPER_2(linkage, rt, op, at0, at1, isPure, storeAccSet ) \
&_JS_CALLINFO(op), \ &_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \ _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \ _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt) _JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_3(linkage, rt, op, at0, at1, at2, cse, fold) \ #define _JS_TN_INIT_HELPER_3(linkage, rt, op, at0, at1, at2, isPure, storeA ccSet) \
&_JS_CALLINFO(op), \ &_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \ _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \ _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt) _JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_4(linkage, rt, op, at0, at1, at2, at3, cse, fold ) \ #define _JS_TN_INIT_HELPER_4(linkage, rt, op, at0, at1, at2, at3, isPure, s toreAccSet) \
&_JS_CALLINFO(op), \ &_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH( at0), \ _JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH( at0), \
_JS_CTYPE_ACH(at3) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH( at0), \ _JS_CTYPE_ACH(at3) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH( at0), \
_JS_CTYPE_FLAGS(rt) _JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_5(linkage, rt, op, at0, at1, at2, at3, at4, cse, fold) \ #define _JS_TN_INIT_HELPER_5(linkage, rt, op, at0, at1, at2, at3, at4, isPu re, storeAccSet) \
&_JS_CALLINFO(op), \ &_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at4) _JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH( at1) \ _JS_CTYPE_PCH(at4) _JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH( at1) \
_JS_CTYPE_PCH(at0), \ _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at4) _JS_CTYPE_ACH(at3) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH( at1) \ _JS_CTYPE_ACH(at4) _JS_CTYPE_ACH(at3) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH( at1) \
_JS_CTYPE_ACH(at0), \ _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt) _JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, cse, fold) \ #define _JS_TN_INIT_HELPER_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, isPure, storeAccSet) \
&_JS_CALLINFO(op), \ &_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at5) _JS_CTYPE_PCH(at4) _JS_CTYPE_PCH(at3) _JS_CTYPE_PCH( at2) \ _JS_CTYPE_PCH(at5) _JS_CTYPE_PCH(at4) _JS_CTYPE_PCH(at3) _JS_CTYPE_PCH( at2) \
_JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \ _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at5) _JS_CTYPE_ACH(at4) _JS_CTYPE_ACH(at3) _JS_CTYPE_ACH( at2) \ _JS_CTYPE_ACH(at5) _JS_CTYPE_ACH(at4) _JS_CTYPE_ACH(at3) _JS_CTYPE_ACH( at2) \
_JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \ _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt) _JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, cse, fold) \ #define _JS_TN_INIT_HELPER_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, isPure, storeAccSet) \
&_JS_CALLINFO(op), \ &_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at6) _JS_CTYPE_PCH(at5) _JS_CTYPE_PCH(at4) _JS_CTYPE_PCH( at3) \ _JS_CTYPE_PCH(at6) _JS_CTYPE_PCH(at5) _JS_CTYPE_PCH(at4) _JS_CTYPE_PCH( at3) \
_JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \ _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at6) _JS_CTYPE_ACH(at5) _JS_CTYPE_ACH(at4) _JS_CTYPE_ACH( at3) \ _JS_CTYPE_ACH(at6) _JS_CTYPE_ACH(at5) _JS_CTYPE_ACH(at4) _JS_CTYPE_ACH( at3) \
_JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \ _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt) _JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, at7, cse, fold) \ #define _JS_TN_INIT_HELPER_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, at7, isPure, storeAccSet) \
&_JS_CALLINFO(op), \ &_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at7) _JS_CTYPE_PCH(at6) _JS_CTYPE_PCH(at5) _JS_CTYPE_PCH( at4) \ _JS_CTYPE_PCH(at7) _JS_CTYPE_PCH(at6) _JS_CTYPE_PCH(at5) _JS_CTYPE_PCH( at4) \
_JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE _PCH(at0), \ _JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE _PCH(at0), \
_JS_CTYPE_ACH(at7) _JS_CTYPE_ACH(at6) _JS_CTYPE_ACH(at5) _JS_CTYPE_ACH( at4) \ _JS_CTYPE_ACH(at7) _JS_CTYPE_ACH(at6) _JS_CTYPE_ACH(at5) _JS_CTYPE_ACH( at4) \
_JS_CTYPE_ACH(at3) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ ACH(at0), \ _JS_CTYPE_ACH(at3) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ ACH(at0), \
_JS_CTYPE_FLAGS(rt) _JS_CTYPE_FLAGS(rt)
#define JS_DEFINE_TRCINFO_1(name, tn0) \ #define JS_DEFINE_TRCINFO_1(name, tn0) \
_JS_DEFINE_CALLINFO_n tn0 \ _JS_DEFINE_CALLINFO_n tn0 \
JSSpecializedNative name##_sns[] = { \ JSSpecializedNative name##_sns[] = { \
skipping to change at line 483 skipping to change at line 498
{ _JS_TN_INIT_HELPER_n tn2 | JSTN_MORE }, \ { _JS_TN_INIT_HELPER_n tn2 | JSTN_MORE }, \
{ _JS_TN_INIT_HELPER_n tn3 } \ { _JS_TN_INIT_HELPER_n tn3 } \
}; \ }; \
JSNativeTraceInfo name##_trcinfo = { (JSFastNative)name, name##_sns }; JSNativeTraceInfo name##_trcinfo = { (JSFastNative)name, name##_sns };
#define _JS_DEFINE_CALLINFO_n(n, args) JS_DEFINE_CALLINFO_##n args #define _JS_DEFINE_CALLINFO_n(n, args) JS_DEFINE_CALLINFO_##n args
jsdouble FASTCALL jsdouble FASTCALL
js_StringToNumber(JSContext* cx, JSString* str); js_StringToNumber(JSContext* cx, JSString* str);
jsdouble FASTCALL
js_BooleanOrUndefinedToNumber(JSContext* cx, int32 unboxed);
/* Extern version of SetBuiltinError. */ /* Extern version of SetBuiltinError. */
extern JS_FRIEND_API(void) extern JS_FRIEND_API(void)
js_SetTraceableNativeFailed(JSContext *cx); js_SetTraceableNativeFailed(JSContext *cx);
extern jsdouble FASTCALL extern jsdouble FASTCALL
js_dmod(jsdouble a, jsdouble b); js_dmod(jsdouble a, jsdouble b);
#else #else
#define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, cse, fold) #define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_2(linkage, rt, op, at0, at1, cse, fold) #define JS_DEFINE_CALLINFO_2(linkage, rt, op, at0, at1, isPure, storeAccSet
#define JS_DEFINE_CALLINFO_3(linkage, rt, op, at0, at1, at2, cse, fold) )
#define JS_DEFINE_CALLINFO_4(linkage, rt, op, at0, at1, at2, at3, cse, fold #define JS_DEFINE_CALLINFO_3(linkage, rt, op, at0, at1, at2, isPure, storeA
) ccSet)
#define JS_DEFINE_CALLINFO_5(linkage, rt, op, at0, at1, at2, at3, at4, cse, #define JS_DEFINE_CALLINFO_4(linkage, rt, op, at0, at1, at2, at3, isPure, s
fold) toreAccSet)
#define JS_DEFINE_CALLINFO_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, #define JS_DEFINE_CALLINFO_5(linkage, rt, op, at0, at1, at2, at3, at4, isPu
cse, fold) re, storeAccSet)
#define JS_DEFINE_CALLINFO_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, #define JS_DEFINE_CALLINFO_6(linkage, rt, op, at0, at1, at2, at3, at4, at5,
at6, cse, fold) isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, #define JS_DEFINE_CALLINFO_7(linkage, rt, op, at0, at1, at2, at3, at4, at5,
at6, at7, cse, fold) at6, isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_8(linkage, rt, op, at0, at1, at2, at3, at4, at5,
at6, at7, isPure, storeAccSet)
#define JS_DECLARE_CALLINFO(name) #define JS_DECLARE_CALLINFO(name)
#define JS_DEFINE_TRCINFO_1(name, tn0) #define JS_DEFINE_TRCINFO_1(name, tn0)
#define JS_DEFINE_TRCINFO_2(name, tn0, tn1) #define JS_DEFINE_TRCINFO_2(name, tn0, tn1)
#define JS_DEFINE_TRCINFO_3(name, tn0, tn1, tn2) #define JS_DEFINE_TRCINFO_3(name, tn0, tn1, tn2)
#define JS_DEFINE_TRCINFO_4(name, tn0, tn1, tn2, tn3) #define JS_DEFINE_TRCINFO_4(name, tn0, tn1, tn2, tn3)
#endif /* !JS_TRACER */ #endif /* !JS_TRACER */
/* Defined in jsarray.cpp. */ /* Defined in jsarray.cpp. */
JS_DECLARE_CALLINFO(js_Array_dense_setelem) JS_DECLARE_CALLINFO(js_Array_dense_setelem)
skipping to change at line 537 skipping to change at line 549
JS_DECLARE_CALLINFO(js_imod) JS_DECLARE_CALLINFO(js_imod)
JS_DECLARE_CALLINFO(js_DoubleToInt32) JS_DECLARE_CALLINFO(js_DoubleToInt32)
JS_DECLARE_CALLINFO(js_DoubleToUint32) JS_DECLARE_CALLINFO(js_DoubleToUint32)
JS_DECLARE_CALLINFO(js_StringToNumber) JS_DECLARE_CALLINFO(js_StringToNumber)
JS_DECLARE_CALLINFO(js_StringToInt32) JS_DECLARE_CALLINFO(js_StringToInt32)
JS_DECLARE_CALLINFO(js_AddProperty) JS_DECLARE_CALLINFO(js_AddProperty)
JS_DECLARE_CALLINFO(js_HasNamedProperty) JS_DECLARE_CALLINFO(js_HasNamedProperty)
JS_DECLARE_CALLINFO(js_HasNamedPropertyInt32) JS_DECLARE_CALLINFO(js_HasNamedPropertyInt32)
JS_DECLARE_CALLINFO(js_TypeOfObject) JS_DECLARE_CALLINFO(js_TypeOfObject)
JS_DECLARE_CALLINFO(js_TypeOfBoolean) JS_DECLARE_CALLINFO(js_TypeOfBoolean)
JS_DECLARE_CALLINFO(js_BooleanOrUndefinedToNumber) JS_DECLARE_CALLINFO(js_BooleanIntToString)
JS_DECLARE_CALLINFO(js_BooleanOrUndefinedToString)
JS_DECLARE_CALLINFO(js_NewNullClosure) JS_DECLARE_CALLINFO(js_NewNullClosure)
JS_DECLARE_CALLINFO(js_PopInterpFrame) JS_DECLARE_CALLINFO(js_PopInterpFrame)
JS_DECLARE_CALLINFO(js_ConcatN) JS_DECLARE_CALLINFO(js_ConcatN)
/* Defined in jsfun.cpp. */ /* Defined in jsfun.cpp. */
JS_DECLARE_CALLINFO(js_AllocFlatClosure) JS_DECLARE_CALLINFO(js_AllocFlatClosure)
JS_DECLARE_CALLINFO(js_PutArguments) JS_DECLARE_CALLINFO(js_PutArguments)
JS_DECLARE_CALLINFO(js_PutCallObjectOnTrace) JS_DECLARE_CALLINFO(js_PutCallObjectOnTrace)
JS_DECLARE_CALLINFO(js_SetCallVar) JS_DECLARE_CALLINFO(js_SetCallVar)
JS_DECLARE_CALLINFO(js_SetCallArg) JS_DECLARE_CALLINFO(js_SetCallArg)
 End of changes. 28 change blocks. 
153 lines changed or deleted 173 lines changed or added


 jscntxt.h   jscntxt.h 
skipping to change at line 52 skipping to change at line 52
#define jscntxt_h___ #define jscntxt_h___
/* /*
* JS execution context. * JS execution context.
*/ */
#include <string.h> #include <string.h>
#include "jsarena.h" /* Added by JSIFY */ #include "jsarena.h" /* Added by JSIFY */
#include "jsclist.h" #include "jsclist.h"
#include "jslong.h" #include "jslong.h"
#include "jsatom.h" #include "jsatom.h"
#include "jsversion.h"
#include "jsdhash.h" #include "jsdhash.h"
#include "jsdtoa.h"
#include "jsgc.h" #include "jsgc.h"
#include "jshashtable.h"
#include "jsinterp.h" #include "jsinterp.h"
#include "jsobj.h" #include "jsobj.h"
#include "jspropertycache.h"
#include "jspropertytree.h"
#include "jsprvtd.h" #include "jsprvtd.h"
#include "jspubtd.h" #include "jspubtd.h"
#include "jsregexp.h" #include "jsregexp.h"
#include "jsutil.h" #include "jsutil.h"
#include "jsarray.h" #include "jsarray.h"
#include "jstask.h" #include "jstask.h"
#include "jsvector.h" #include "jsvector.h"
#include "jshashtable.h"
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4100) /* Silence unreferenced formal parameter warn
ings */
#pragma warning(push)
#pragma warning(disable:4355) /* Silence warning about "this" used in base
member initializer list */
#endif
/* /*
* js_GetSrcNote cache to avoid O(n^2) growth in finding a source note for a * js_GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
* given pc in a script. We use the script->code pointer to tag the cache, * given pc in a script. We use the script->code pointer to tag the cache,
* instead of the script address itself, so that source notes are always fo und * instead of the script address itself, so that source notes are always fo und
* by offset from the bytecode with which they were generated. * by offset from the bytecode with which they were generated.
*/ */
typedef struct JSGSNCache { typedef struct JSGSNCache {
jsbytecode *code; jsbytecode *code;
JSDHashTable table; JSDHashTable table;
skipping to change at line 128 skipping to change at line 137
class VMAllocator; class VMAllocator;
class FrameInfoCache; class FrameInfoCache;
struct REHashFn; struct REHashFn;
struct REHashKey; struct REHashKey;
struct FrameInfo; struct FrameInfo;
struct VMSideExit; struct VMSideExit;
struct TreeFragment; struct TreeFragment;
struct InterpState; struct InterpState;
template<typename T> class Queue; template<typename T> class Queue;
typedef Queue<uint16> SlotList; typedef Queue<uint16> SlotList;
struct TypeMap; class TypeMap;
struct REFragment; struct REFragment;
typedef nanojit::HashMap<REHashKey, REFragment*, REHashFn> REHashMap; typedef nanojit::HashMap<REHashKey, REFragment*, REHashFn> REHashMap;
#if defined(JS_JIT_SPEW) || defined(DEBUG) #if defined(JS_JIT_SPEW) || defined(DEBUG)
struct FragPI; struct FragPI;
typedef nanojit::HashMap<uint32, FragPI, nanojit::DefaultHash<uint32> > Fra gStatsMap; typedef nanojit::HashMap<uint32, FragPI, nanojit::DefaultHash<uint32> > Fra gStatsMap;
#endif #endif
/* /*
* Allocation policy that calls JSContext memory functions and reports erro rs * Allocation policy that calls JSContext memory functions and reports erro rs
skipping to change at line 286 skipping to change at line 295
void resume() { void resume() {
JS_ASSERT(suspendedFrame); JS_ASSERT(suspendedFrame);
suspendedFrame = NULL; suspendedFrame = NULL;
} }
JSStackFrame *getSuspendedFrame() const { JSStackFrame *getSuspendedFrame() const {
JS_ASSERT(suspendedFrame); JS_ASSERT(suspendedFrame);
return suspendedFrame; return suspendedFrame;
} }
bool isSuspended() const { return suspendedFrame; } bool isSuspended() const { return !!suspendedFrame; }
void setPrevious(CallStack *cs) { previous = cs; } void setPrevious(CallStack *cs) { previous = cs; }
CallStack *getPrevious() const { return previous; } CallStack *getPrevious() const { return previous; }
void setInitialVarObj(JSObject *o) { initialVarObj = o; } void setInitialVarObj(JSObject *o) { initialVarObj = o; }
JSObject *getInitialVarObj() const { return initialVarObj; } JSObject *getInitialVarObj() const { return initialVarObj; }
void setInitialFrame(JSStackFrame *f) { initialFrame = f; } void setInitialFrame(JSStackFrame *f) { initialFrame = f; }
JSStackFrame *getInitialFrame() const { return initialFrame; } JSStackFrame *getInitialFrame() const { return initialFrame; }
skipping to change at line 409 skipping to change at line 418
uint32 maxCodeCacheBytes; uint32 maxCodeCacheBytes;
/* /*
* If nonzero, do not flush the JIT cache after a deep bail. That would * If nonzero, do not flush the JIT cache after a deep bail. That would
* free JITted code pages that we will later return to. Instead, set th e * free JITted code pages that we will later return to. Instead, set th e
* needFlush flag so that it can be flushed later. * needFlush flag so that it can be flushed later.
*/ */
JSBool needFlush; JSBool needFlush;
/* /*
* reservedObjects is a linked list (via fslots[0]) of preallocated JSO
bjects.
* The JIT uses this to ensure that leaving a trace tree can't fail.
*/
JSBool useReservedObjects;
JSObject *reservedObjects;
/*
* Fragment map for the regular expression compiler. * Fragment map for the regular expression compiler.
*/ */
REHashMap* reFragments; REHashMap* reFragments;
// Cached temporary typemap to avoid realloc'ing every time we create o ne. // Cached temporary typemap to avoid realloc'ing every time we create o ne.
// This must be used in only one place at a given time. It must be clea red // This must be used in only one place at a given time. It must be clea red
// before use. // before use.
TypeMap* cachedTempTypeMap; TypeMap* cachedTempTypeMap;
#ifdef DEBUG #ifdef DEBUG
skipping to change at line 534 skipping to change at line 536
*/ */
bool waiveGCQuota; bool waiveGCQuota;
/* /*
* The GSN cache is per thread since even multi-cx-per-thread embedding s * The GSN cache is per thread since even multi-cx-per-thread embedding s
* do not interleave js_GetSrcNote calls. * do not interleave js_GetSrcNote calls.
*/ */
JSGSNCache gsnCache; JSGSNCache gsnCache;
/* Property cache for faster call/get/set invocation. */ /* Property cache for faster call/get/set invocation. */
JSPropertyCache propertyCache; js::PropertyCache propertyCache;
/* Random number generator state, used by jsmath.cpp. */
int64 rngSeed;
/* Optional stack of heap-allocated scoped local GC roots. */ /* Optional stack of heap-allocated scoped local GC roots. */
JSLocalRootStack *localRootStack; JSLocalRootStack *localRootStack;
#ifdef JS_TRACER #ifdef JS_TRACER
/* Trace-tree JIT recorder/interpreter state. */ /* Trace-tree JIT recorder/interpreter state. */
js::TraceMonitor traceMonitor; js::TraceMonitor traceMonitor;
#endif #endif
/* Lock-free hashed lists of scripts created by eval to garbage-collect . */ /* Lock-free hashed lists of scripts created by eval to garbage-collect . */
JSScript *scriptsToGC[JS_EVAL_CACHE_SIZE]; JSScript *scriptsToGC[JS_EVAL_CACHE_SIZE];
#ifdef JS_EVAL_CACHE_METERING #ifdef JS_EVAL_CACHE_METERING
JSEvalCacheMeter evalCacheMeter; JSEvalCacheMeter evalCacheMeter;
#endif #endif
/* State used by dtoa.c. */
DtoaState *dtoaState;
/* /*
* Cache of reusable JSNativeEnumerators mapped by shape identifiers (a s * Cache of reusable JSNativeEnumerators mapped by shape identifiers (a s
* stored in scope->shape). This cache is nulled by the GC and protecte d * stored in scope->shape). This cache is nulled by the GC and protecte d
* by gcLock. * by gcLock.
*/ */
#define NATIVE_ENUM_CACHE_LOG2 8 #define NATIVE_ENUM_CACHE_LOG2 8
#define NATIVE_ENUM_CACHE_MASK JS_BITMASK(NATIVE_ENUM_CACHE_LOG2) #define NATIVE_ENUM_CACHE_MASK JS_BITMASK(NATIVE_ENUM_CACHE_LOG2)
#define NATIVE_ENUM_CACHE_SIZE JS_BIT(NATIVE_ENUM_CACHE_LOG2) #define NATIVE_ENUM_CACHE_SIZE JS_BIT(NATIVE_ENUM_CACHE_LOG2)
#define NATIVE_ENUM_CACHE_HASH(shape) \ #define NATIVE_ENUM_CACHE_HASH(shape) \
((((shape) >> NATIVE_ENUM_CACHE_LOG2) ^ (shape)) & NATIVE_ENUM_CACHE_MA SK) ((((shape) >> NATIVE_ENUM_CACHE_LOG2) ^ (shape)) & NATIVE_ENUM_CACHE_MA SK)
jsuword nativeEnumCache[NATIVE_ENUM_CACHE_SIZE]; jsuword nativeEnumCache[NATIVE_ENUM_CACHE_SIZE];
void init(); bool init();
void finish(); void finish();
void mark(JSTracer *trc); void mark(JSTracer *trc);
void purge(JSContext *cx); void purge(JSContext *cx);
void purgeGCFreeLists(); void purgeGCFreeLists();
}; };
#ifdef JS_THREADSAFE #ifdef JS_THREADSAFE
/* /*
* Structure uniquely representing a thread. It holds thread-private data * Structure uniquely representing a thread. It holds thread-private data
skipping to change at line 685 skipping to change at line 687
struct JSSetSlotRequest { struct JSSetSlotRequest {
JSObject *obj; /* object containing slot to set */ JSObject *obj; /* object containing slot to set */
JSObject *pobj; /* new proto or parent reference */ JSObject *pobj; /* new proto or parent reference */
uint16 slot; /* which to set, proto or parent */ uint16 slot; /* which to set, proto or parent */
JSPackedBool cycle; /* true if a cycle was detected */ JSPackedBool cycle; /* true if a cycle was detected */
JSSetSlotRequest *next; /* next request in GC worklist */ JSSetSlotRequest *next; /* next request in GC worklist */
}; };
/* Caching Class.prototype lookups for the standard classes. */ /* Caching Class.prototype lookups for the standard classes. */
struct JSClassProtoCache { struct JSClassProtoCache {
void purge() { memset(entries, 0, sizeof(entries)); } void purge() { js::PodArrayZero(entries); }
#ifdef JS_PROTO_CACHE_METERING #ifdef JS_PROTO_CACHE_METERING
struct Stats { struct Stats {
int32 probe, hit; int32 probe, hit;
}; };
# define PROTO_CACHE_METER(cx, x) \ # define PROTO_CACHE_METER(cx, x) \
((void) (JS_ATOMIC_INCREMENT(&(cx)->runtime->classProtoCacheStats.x))) ((void) (JS_ATOMIC_INCREMENT(&(cx)->runtime->classProtoCacheStats.x)))
#else #else
# define PROTO_CACHE_METER(cx, x) ((void) 0) # define PROTO_CACHE_METER(cx, x) ((void) 0)
#endif #endif
private: private:
struct GlobalAndProto { struct GlobalAndProto {
JSObject *global; JSObject *global;
JSObject *proto; JSObject *proto;
}; };
GlobalAndProto entries[JSProto_LIMIT - JSProto_Object]; GlobalAndProto entries[JSProto_LIMIT - JSProto_Object];
#ifdef __GNUC__
# pragma GCC visibility push(default)
#endif
friend JSBool js_GetClassPrototype(JSContext *cx, JSObject *scope, friend JSBool js_GetClassPrototype(JSContext *cx, JSObject *scope,
JSProtoKey protoKey, JSObject **prot op, JSProtoKey protoKey, JSObject **prot op,
JSClass *clasp); JSClass *clasp);
#ifdef __GNUC__
# pragma GCC visibility pop
#endif
}; };
struct JSRuntime { struct JSRuntime {
/* Runtime state, synchronized by the stateChange/gcLock condvar/lock. */ /* Runtime state, synchronized by the stateChange/gcLock condvar/lock. */
JSRuntimeState state; JSRuntimeState state;
/* Context create/destroy callback. */ /* Context create/destroy callback. */
JSContextCallback cxCallback; JSContextCallback cxCallback;
/* /*
skipping to change at line 816 skipping to change at line 824
* been poked, it won't scan for garbage. This member is protected by * been poked, it won't scan for garbage. This member is protected by
* rt->gcLock. * rt->gcLock.
*/ */
JSSetSlotRequest *setSlotRequests; JSSetSlotRequest *setSlotRequests;
/* Well-known numbers held for use by this runtime's contexts. */ /* Well-known numbers held for use by this runtime's contexts. */
jsval NaNValue; jsval NaNValue;
jsval negativeInfinityValue; jsval negativeInfinityValue;
jsval positiveInfinityValue; jsval positiveInfinityValue;
#ifdef JS_THREADSAFE js::DeflatedStringCache *deflatedStringCache;
JSLock *deflatedStringCacheLock;
#endif
JSHashTable *deflatedStringCache;
#ifdef DEBUG
uint32 deflatedStringCacheBytes;
#endif
JSString *emptyString; JSString *emptyString;
/* /*
* Builtin functions, lazily created and held for use by the trace reco rder. * Builtin functions, lazily created and held for use by the trace reco rder.
* *
* This field would be #ifdef JS_TRACER, but XPConnect is compiled with out * This field would be #ifdef JS_TRACER, but XPConnect is compiled with out
* -DJS_TRACER and includes this header. * -DJS_TRACER and includes this header.
*/ */
JSObject *builtinFunctions[JSBUILTIN_LIMIT]; JSObject *builtinFunctions[JSBUILTIN_LIMIT];
skipping to change at line 915 skipping to change at line 917
uint32 debuggerMutations; uint32 debuggerMutations;
/* /*
* Security callbacks set on the runtime are used by each context unles s * Security callbacks set on the runtime are used by each context unles s
* an override is set on the context. * an override is set on the context.
*/ */
JSSecurityCallbacks *securityCallbacks; JSSecurityCallbacks *securityCallbacks;
/* /*
* Shared scope property tree, and arena-pool for allocating its nodes. * Shared scope property tree, and arena-pool for allocating its nodes.
* This really should be free of all locking overhead and allocated in
* thread-local storage, hence the JS_PROPERTY_TREE(cx) macro.
*/
js::PropertyTree propertyTree;
#define JS_PROPERTY_TREE(cx) ((cx)->runtime->propertyTree)
/*
* The propertyRemovals counter is incremented for every JSScope::clear , * The propertyRemovals counter is incremented for every JSScope::clear ,
* and for each JSScope::remove method call that frees a slot in an obj ect. * and for each JSScope::remove method call that frees a slot in an obj ect.
* See js_NativeGet and js_NativeSet in jsobj.c. * See js_NativeGet and js_NativeSet in jsobj.cpp.
*/ */
JSDHashTable propertyTreeHash;
JSScopeProperty *propertyFreeList;
JSArenaPool propertyArenaPool;
int32 propertyRemovals; int32 propertyRemovals;
/* Script filename table. */ /* Script filename table. */
struct JSHashTable *scriptFilenameTable; struct JSHashTable *scriptFilenameTable;
JSCList scriptFilenamePrefixes; JSCList scriptFilenamePrefixes;
#ifdef JS_THREADSAFE #ifdef JS_THREADSAFE
PRLock *scriptFilenameTableLock; PRLock *scriptFilenameTableLock;
#endif #endif
/* Number localization, used by jsnum.c */ /* Number localization, used by jsnum.c */
skipping to change at line 974 skipping to change at line 981
*/ */
volatile uint32 shapeGen; volatile uint32 shapeGen;
/* Literal table maintained by jsatom.c functions. */ /* Literal table maintained by jsatom.c functions. */
JSAtomState atomState; JSAtomState atomState;
#ifdef JS_THREADSAFE #ifdef JS_THREADSAFE
JSBackgroundThread *deallocatorThread; JSBackgroundThread *deallocatorThread;
#endif #endif
JSEmptyScope *emptyArgumentsScope;
JSEmptyScope *emptyBlockScope; JSEmptyScope *emptyBlockScope;
/* /*
* Various metering fields are defined at the end of JSRuntime. In this * Various metering fields are defined at the end of JSRuntime. In this
* way there is no need to recompile all the code that refers to other * way there is no need to recompile all the code that refers to other
* fields of JSRuntime after enabling the corresponding metering macro. * fields of JSRuntime after enabling the corresponding metering macro.
*/ */
#ifdef JS_DUMP_ENUM_CACHE_STATS #ifdef JS_DUMP_ENUM_CACHE_STATS
int32 nativeEnumProbes; int32 nativeEnumProbes;
int32 nativeEnumMisses; int32 nativeEnumMisses;
skipping to change at line 1157 skipping to change at line 1165
} JSResolvingKey; } JSResolvingKey;
typedef struct JSResolvingEntry { typedef struct JSResolvingEntry {
JSDHashEntryHdr hdr; JSDHashEntryHdr hdr;
JSResolvingKey key; JSResolvingKey key;
uint32 flags; uint32 flags;
} JSResolvingEntry; } JSResolvingEntry;
#define JSRESFLAG_LOOKUP 0x1 /* resolving id from lookup */ #define JSRESFLAG_LOOKUP 0x1 /* resolving id from lookup */
#define JSRESFLAG_WATCH 0x2 /* resolving id from watch */ #define JSRESFLAG_WATCH 0x2 /* resolving id from watch */
/*
* Macros to push/pop JSTempValueRooter instances to context-linked stack o
f
* temporary GC roots. If you need to protect a result value that flows out
of
* a C function across several layers of other functions, use the
* js_LeaveLocalRootScopeWithResult internal API (see further below) instea
d.
*
* The macros also provide a simple way to get a single rooted pointer via
* JS_PUSH_TEMP_ROOT_<KIND>(cx, NULL, &tvr). Then &tvr.u.<kind> gives the
* necessary pointer.
*
* JSTempValueRooter.count defines the type of the rooted value referenced
by
* JSTempValueRooter.u union of type JSTempValueUnion. When count is positi
ve
* or zero, u.array points to a vector of jsvals. Otherwise it must be one
of
* the following constants:
*/
#define JSTVU_SINGLE (-1) /* u.value or u.<gcthing> is single jsv
al
or non-JSString GC-thing pointer */
#define JSTVU_TRACE (-2) /* u.trace is a hook to trace a custom
* structure */
#define JSTVU_SPROP (-3) /* u.sprop roots property tree node */
#define JSTVU_WEAK_ROOTS (-4) /* u.weakRoots points to saved weak roo
ts */
#define JSTVU_COMPILER (-5) /* u.compiler roots JSCompiler* */
#define JSTVU_SCRIPT (-6) /* u.script roots JSScript* */
#define JSTVU_ENUMERATOR (-7) /* a pointer to JSTempValueRooter point
s
to an instance of JSAutoEnumStateRoo
ter
with u.object storing the enumeratio
n
object */
/*
* Here single JSTVU_SINGLE covers both jsval and pointers to almost (see n
ote
* below) any GC-thing via reinterpreting the thing as JSVAL_OBJECT. This w
orks
* because the GC-thing is aligned on a 0 mod 8 boundary, and object has th
e 0
* jsval tag. So any GC-heap-allocated thing pointer may be tagged as if it
* were an object and untagged, if it's then used only as an opaque pointer
* until discriminated by other means than tag bits. This is how, for examp
le,
* js_GetGCThingTraceKind uses its |thing| parameter -- it consults GC-thin
g
* flags stored separately from the thing to decide the kind of thing.
*
* Note well that JSStrings may be statically allocated (see the intStringT
able
* and unitStringTable static arrays), so this hack does not work for arbit
rary
* GC-thing pointers.
*/
#define JS_PUSH_TEMP_ROOT_COMMON(cx,x,tvr,cnt,kind)
\
JS_BEGIN_MACRO
\
JS_ASSERT((cx)->tempValueRooters != (tvr));
\
(tvr)->count = (cnt);
\
(tvr)->u.kind = (x);
\
(tvr)->down = (cx)->tempValueRooters;
\
(cx)->tempValueRooters = (tvr);
\
JS_END_MACRO
#define JS_POP_TEMP_ROOT(cx,tvr)
\
JS_BEGIN_MACRO
\
JS_ASSERT((cx)->tempValueRooters == (tvr));
\
(cx)->tempValueRooters = (tvr)->down;
\
JS_END_MACRO
#define JS_PUSH_TEMP_ROOT(cx,cnt,arr,tvr)
\
JS_BEGIN_MACRO
\
JS_ASSERT((int)(cnt) >= 0);
\
JS_PUSH_TEMP_ROOT_COMMON(cx, arr, tvr, (ptrdiff_t) (cnt), array);
\
JS_END_MACRO
#define JS_PUSH_SINGLE_TEMP_ROOT(cx,val,tvr)
\
JS_PUSH_TEMP_ROOT_COMMON(cx, val, tvr, JSTVU_SINGLE, value)
#define JS_PUSH_TEMP_ROOT_OBJECT(cx,obj,tvr)
\
JS_PUSH_TEMP_ROOT_COMMON(cx, obj, tvr, JSTVU_SINGLE, object)
#define JS_PUSH_TEMP_ROOT_STRING(cx,str,tvr)
\
JS_PUSH_SINGLE_TEMP_ROOT(cx, str ? STRING_TO_JSVAL(str) : JSVAL_NULL, t
vr)
#define JS_PUSH_TEMP_ROOT_XML(cx,xml_,tvr)
\
JS_PUSH_TEMP_ROOT_COMMON(cx, xml_, tvr, JSTVU_SINGLE, xml)
#define JS_PUSH_TEMP_ROOT_TRACE(cx,trace_,tvr)
\
JS_PUSH_TEMP_ROOT_COMMON(cx, trace_, tvr, JSTVU_TRACE, trace)
#define JS_PUSH_TEMP_ROOT_SPROP(cx,sprop_,tvr)
\
JS_PUSH_TEMP_ROOT_COMMON(cx, sprop_, tvr, JSTVU_SPROP, sprop)
#define JS_PUSH_TEMP_ROOT_WEAK_COPY(cx,weakRoots_,tvr)
\
JS_PUSH_TEMP_ROOT_COMMON(cx, weakRoots_, tvr, JSTVU_WEAK_ROOTS, weakRoo
ts)
#define JS_PUSH_TEMP_ROOT_COMPILER(cx,pc,tvr)
\
JS_PUSH_TEMP_ROOT_COMMON(cx, pc, tvr, JSTVU_COMPILER, compiler)
#define JS_PUSH_TEMP_ROOT_SCRIPT(cx,script_,tvr)
\
JS_PUSH_TEMP_ROOT_COMMON(cx, script_, tvr, JSTVU_SCRIPT, script)
#define JSRESOLVE_INFER 0xffff /* infer bits from current bytecode */ #define JSRESOLVE_INFER 0xffff /* infer bits from current bytecode */
extern const JSDebugHooks js_NullDebugHooks; /* defined in jsdbgapi.cpp */ extern const JSDebugHooks js_NullDebugHooks; /* defined in jsdbgapi.cpp */
/* namespace js {
* Wraps a stack frame which has been temporarily popped from its call stac class AutoGCRooter;
k }
* and needs to be GC-reachable. See JSContext::{push,pop}GCReachableFrame.
*/
struct JSGCReachableFrame {
JSGCReachableFrame *next;
JSStackFrame *frame;
};
struct JSContext struct JSContext
{ {
explicit JSContext(JSRuntime *rt) : runtime(rt), busyArrays(this) {} explicit JSContext(JSRuntime *rt) : runtime(rt), busyArrays(this) {}
/* /*
* If this flag is set, we were asked to call back the operation callba ck * If this flag is set, we were asked to call back the operation callba ck
* as soon as possible. * as soon as possible.
*/ */
volatile jsint operationCallbackFlag; volatile jsint operationCallbackFlag;
skipping to change at line 1379 skipping to change at line 1291
/* Branch callback. */ /* Branch callback. */
JSOperationCallback operationCallback; JSOperationCallback operationCallback;
/* Interpreter activation count. */ /* Interpreter activation count. */
uintN interpLevel; uintN interpLevel;
/* Client opaque pointers. */ /* Client opaque pointers. */
void *data; void *data;
void *data2; void *data2;
/* Linked list of frames temporarily popped from their chain. */
JSGCReachableFrame *reachableFrames;
void pushGCReachableFrame(JSGCReachableFrame &gcrf, JSStackFrame *f) {
gcrf.next = reachableFrames;
gcrf.frame = f;
reachableFrames = &gcrf;
}
void popGCReachableFrame() {
reachableFrames = reachableFrames->next;
}
private: private:
#ifdef __GNUC__ #ifdef __GNUC__
# pragma GCC visibility push(default) # pragma GCC visibility push(default)
#endif #endif
friend void js_TraceContext(JSTracer *, JSContext *); friend void js_TraceContext(JSTracer *, JSContext *);
#ifdef __GNUC__ #ifdef __GNUC__
# pragma GCC visibility pop # pragma GCC visibility pop
#endif #endif
/* Linked list of callstacks. See CallStack. */ /* Linked list of callstacks. See CallStack. */
skipping to change at line 1446 skipping to change at line 1345
fp = NULL; fp = NULL;
} }
/* Undoes calls to suspendTopCallStack. */ /* Undoes calls to suspendTopCallStack. */
void restoreCallStack() { void restoreCallStack() {
JS_ASSERT(!fp && currentCallStack && currentCallStack->isSuspended( )); JS_ASSERT(!fp && currentCallStack && currentCallStack->isSuspended( ));
fp = currentCallStack->getSuspendedFrame(); fp = currentCallStack->getSuspendedFrame();
currentCallStack->restore(); currentCallStack->restore();
} }
/*
* Perform a linear search of all frames in all callstacks in the given
context
* for the given frame, returning the callstack, if found, and null oth
erwise.
*/
js::CallStack *containingCallStack(JSStackFrame *target);
#ifdef JS_THREADSAFE #ifdef JS_THREADSAFE
JSThread *thread; JSThread *thread;
jsrefcount requestDepth; jsrefcount requestDepth;
/* Same as requestDepth but ignoring JS_SuspendRequest/JS_ResumeRequest */ /* Same as requestDepth but ignoring JS_SuspendRequest/JS_ResumeRequest */
jsrefcount outstandingRequests; jsrefcount outstandingRequests;
JSTitle *lockedSealedTitle; /* weak ref, for low-cost seale d JSTitle *lockedSealedTitle; /* weak ref, for low-cost seale d
title locking */ title locking */
JSCList threadLinks; /* JSThread contextList linkage */ JSCList threadLinks; /* JSThread contextList linkage */
#define CX_FROM_THREAD_LINKS(tl) \ #define CX_FROM_THREAD_LINKS(tl) \
((JSContext *)((char *)(tl) - offsetof(JSContext, threadLinks))) ((JSContext *)((char *)(tl) - offsetof(JSContext, threadLinks)))
#endif #endif
/* PDL of stack headers describing stack slots not rooted by argv, etc. */ /* PDL of stack headers describing stack slots not rooted by argv, etc. */
JSStackHeader *stackHeaders; JSStackHeader *stackHeaders;
/* Stack of thread-stack-allocated temporary GC roots. */ /* Stack of thread-stack-allocated GC roots. */
JSTempValueRooter *tempValueRooters; js::AutoGCRooter *autoGCRooters;
/* Debug hooks associated with the current context. */ /* Debug hooks associated with the current context. */
const JSDebugHooks *debugHooks; const JSDebugHooks *debugHooks;
/* Security callbacks that override any defined on the runtime. */ /* Security callbacks that override any defined on the runtime. */
JSSecurityCallbacks *securityCallbacks; JSSecurityCallbacks *securityCallbacks;
/* Pinned regexp pool used for regular expressions. */ /* Pinned regexp pool used for regular expressions. */
JSArenaPool regexpPool; JSArenaPool regexpPool;
/* Stored here to avoid passing it around as a parameter. */ /* Stored here to avoid passing it around as a parameter. */
uintN resolveFlags; uintN resolveFlags;
/* Random number generator state, used by jsmath.cpp. */
int64 rngSeed;
#ifdef JS_TRACER #ifdef JS_TRACER
/* /*
* State for the current tree execution. bailExit is valid if the tree has * State for the current tree execution. bailExit is valid if the tree has
* called back into native code via a _FAIL builtin and has not yet bai led, * called back into native code via a _FAIL builtin and has not yet bai led,
* else garbage (NULL in debug builds). * else garbage (NULL in debug builds).
*/ */
js::InterpState *interpState; js::InterpState *interpState;
js::VMSideExit *bailExit; js::VMSideExit *bailExit;
/* /*
skipping to change at line 1700 skipping to change at line 1608
#ifdef __cplusplus #ifdef __cplusplus
static inline JSAtom ** static inline JSAtom **
FrameAtomBase(JSContext *cx, JSStackFrame *fp) FrameAtomBase(JSContext *cx, JSStackFrame *fp)
{ {
return fp->imacpc return fp->imacpc
? COMMON_ATOMS_START(&cx->runtime->atomState) ? COMMON_ATOMS_START(&cx->runtime->atomState)
: fp->script->atomMap.vector; : fp->script->atomMap.vector;
} }
namespace js {
class AutoGCRooter {
public:
AutoGCRooter(JSContext *cx, ptrdiff_t tag)
: down(cx->autoGCRooters), tag(tag), context(cx)
{
JS_ASSERT(this != cx->autoGCRooters);
cx->autoGCRooters = this;
}
~AutoGCRooter() {
JS_ASSERT(this == context->autoGCRooters);
context->autoGCRooters = down;
}
inline void trace(JSTracer *trc);
#ifdef __GNUC__
# pragma GCC visibility push(default)
#endif
friend void ::js_TraceContext(JSTracer *trc, JSContext *acx);
#ifdef __GNUC__
# pragma GCC visibility pop
#endif
protected:
AutoGCRooter * const down;
/*
* Discriminates actual subclass of this being used. If non-negative,
the
* subclass roots an array of jsvals of the length stored in this field
.
* If negative, meaning is indicated by the corresponding value in the
enum
* below. Any other negative value indicates some deeper problem such
as
* memory corruption.
*/
ptrdiff_t tag;
JSContext * const context;
enum {
JSVAL = -1, /* js::AutoValueRooter */
SPROP = -2, /* js::AutoScopePropertyRooter */
WEAKROOTS = -3, /* js::AutoSaveWeakRoots */
COMPILER = -4, /* JSCompiler */
SCRIPT = -5, /* js::AutoScriptRooter */
ENUMERATOR = -6, /* js::AutoEnumStateRooter */
IDARRAY = -7, /* js::AutoIdArray */
DESCRIPTORS = -8, /* js::AutoDescriptorArray */
NAMESPACES = -9, /* js::AutoNamespaceArray */
XML = -10, /* js::AutoXMLRooter */
OBJECT = -11, /* js::AutoObjectRooter */
ID = -12, /* js::AutoIdRooter */
VECTOR = -13 /* js::AutoValueVector */
};
};
class AutoSaveWeakRoots : private AutoGCRooter
{
public:
explicit AutoSaveWeakRoots(JSContext *cx
JS_GUARD_OBJECT_NOTIFIER_PARAM)
: AutoGCRooter(cx, WEAKROOTS), savedRoots(cx->weakRoots)
{
JS_GUARD_OBJECT_NOTIFIER_INIT;
}
friend void AutoGCRooter::trace(JSTracer *trc);
private:
JSWeakRoots savedRoots;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
};
/* FIXME(bug 332648): Move this into a public header. */ /* FIXME(bug 332648): Move this into a public header. */
class JSAutoTempValueRooter class AutoValueRooter : private AutoGCRooter
{ {
public: public:
JSAutoTempValueRooter(JSContext *cx, size_t len, jsval *vec explicit AutoValueRooter(JSContext *cx, jsval v = JSVAL_NULL
JS_GUARD_OBJECT_NOTIFIER_PARAM) JS_GUARD_OBJECT_NOTIFIER_PARAM)
: mContext(cx) { : AutoGCRooter(cx, JSVAL), val(v)
{
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
JS_PUSH_TEMP_ROOT(mContext, len, vec, &mTvr);
} }
explicit JSAutoTempValueRooter(JSContext *cx, jsval v = JSVAL_NULL AutoValueRooter(JSContext *cx, JSString *str
JS_GUARD_OBJECT_NOTIFIER_PARAM) JS_GUARD_OBJECT_NOTIFIER_PARAM)
: mContext(cx) { : AutoGCRooter(cx, JSVAL), val(STRING_TO_JSVAL(str))
{
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
JS_PUSH_SINGLE_TEMP_ROOT(mContext, v, &mTvr);
} }
JSAutoTempValueRooter(JSContext *cx, JSString *str AutoValueRooter(JSContext *cx, JSObject *obj
JS_GUARD_OBJECT_NOTIFIER_PARAM) JS_GUARD_OBJECT_NOTIFIER_PARAM)
: mContext(cx) { : AutoGCRooter(cx, JSVAL), val(OBJECT_TO_JSVAL(obj))
{
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
JS_PUSH_TEMP_ROOT_STRING(mContext, str, &mTvr);
} }
JSAutoTempValueRooter(JSContext *cx, JSObject *obj
JS_GUARD_OBJECT_NOTIFIER_PARAM) void setObject(JSObject *obj) {
: mContext(cx) { JS_ASSERT(tag == JSVAL);
val = OBJECT_TO_JSVAL(obj);
}
void setString(JSString *str) {
JS_ASSERT(tag == JSVAL);
JS_ASSERT(str);
val = STRING_TO_JSVAL(str);
}
void setDouble(jsdouble *dp) {
JS_ASSERT(tag == JSVAL);
JS_ASSERT(dp);
val = DOUBLE_TO_JSVAL(dp);
}
jsval value() const {
JS_ASSERT(tag == JSVAL);
return val;
}
jsval *addr() {
JS_ASSERT(tag == JSVAL);
return &val;
}
friend void AutoGCRooter::trace(JSTracer *trc);
private:
jsval val;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
};
class AutoObjectRooter : private AutoGCRooter {
public:
AutoObjectRooter(JSContext *cx, JSObject *obj = NULL
JS_GUARD_OBJECT_NOTIFIER_PARAM)
: AutoGCRooter(cx, OBJECT), obj(obj)
{
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
JS_PUSH_TEMP_ROOT_OBJECT(mContext, obj, &mTvr);
} }
JSAutoTempValueRooter(JSContext *cx, JSScopeProperty *sprop
JS_GUARD_OBJECT_NOTIFIER_PARAM) void setObject(JSObject *obj) {
: mContext(cx) { this->obj = obj;
}
JSObject * object() const {
return obj;
}
JSObject ** addr() {
return &obj;
}
friend void AutoGCRooter::trace(JSTracer *trc);
private:
JSObject *obj;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
};
class AutoArrayRooter : private AutoGCRooter {
public:
AutoArrayRooter(JSContext *cx, size_t len, jsval *vec
JS_GUARD_OBJECT_NOTIFIER_PARAM)
: AutoGCRooter(cx, len), array(vec)
{
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
JS_PUSH_TEMP_ROOT_SPROP(mContext, sprop, &mTvr); JS_ASSERT(tag >= 0);
} }
~JSAutoTempValueRooter() { void changeLength(size_t newLength) {
JS_POP_TEMP_ROOT(mContext, &mTvr); tag = ptrdiff_t(newLength);
JS_ASSERT(tag >= 0);
} }
jsval value() { return mTvr.u.value; } void changeArray(jsval *newArray, size_t newLength) {
jsval *addr() { return &mTvr.u.value; } changeLength(newLength);
array = newArray;
}
protected: jsval *array;
JSContext *mContext;
friend void AutoGCRooter::trace(JSTracer *trc);
private: private:
JSTempValueRooter mTvr;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER JS_DECL_USE_GUARD_OBJECT_NOTIFIER
}; };
class JSAutoTempIdRooter class AutoScopePropertyRooter : private AutoGCRooter {
public:
AutoScopePropertyRooter(JSContext *cx, JSScopeProperty *sprop
JS_GUARD_OBJECT_NOTIFIER_PARAM)
: AutoGCRooter(cx, SPROP), sprop(sprop)
{
JS_GUARD_OBJECT_NOTIFIER_INIT;
}
friend void AutoGCRooter::trace(JSTracer *trc);
private:
JSScopeProperty * const sprop;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
};
class AutoScriptRooter : private AutoGCRooter {
public:
AutoScriptRooter(JSContext *cx, JSScript *script
JS_GUARD_OBJECT_NOTIFIER_PARAM)
: AutoGCRooter(cx, SCRIPT), script(script)
{
JS_GUARD_OBJECT_NOTIFIER_INIT;
}
void setScript(JSScript *script) {
this->script = script;
}
friend void AutoGCRooter::trace(JSTracer *trc);
private:
JSScript *script;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
};
class AutoIdRooter : private AutoGCRooter
{ {
public: public:
explicit JSAutoTempIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0) explicit AutoIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0)
JS_GUARD_OBJECT_NOTIFIER_PARAM) JS_GUARD_OBJECT_NOTIFIER_PARAM)
: mContext(cx) { : AutoGCRooter(cx, ID), idval(id)
{
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
JS_PUSH_SINGLE_TEMP_ROOT(mContext, ID_TO_VALUE(id), &mTvr);
} }
~JSAutoTempIdRooter() { jsid id() {
JS_POP_TEMP_ROOT(mContext, &mTvr); return idval;
} }
jsid id() { return (jsid) mTvr.u.value; } jsid * addr() {
jsid * addr() { return (jsid *) &mTvr.u.value; } return &idval;
}
friend void AutoGCRooter::trace(JSTracer *trc);
private: private:
JSContext *mContext; jsid idval;
JSTempValueRooter mTvr;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER JS_DECL_USE_GUARD_OBJECT_NOTIFIER
}; };
class JSAutoIdArray { class AutoIdArray : private AutoGCRooter {
public: public:
JSAutoIdArray(JSContext *cx, JSIdArray *ida AutoIdArray(JSContext *cx, JSIdArray *ida
JS_GUARD_OBJECT_NOTIFIER_PARAM) JS_GUARD_OBJECT_NOTIFIER_PARAM)
: cx(cx), idArray(ida) { : AutoGCRooter(cx, ida ? ida->length : 0), idArray(ida)
{
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
if (ida)
JS_PUSH_TEMP_ROOT(cx, ida->length, ida->vector, &tvr);
} }
~JSAutoIdArray() { ~AutoIdArray() {
if (idArray) { if (idArray)
JS_POP_TEMP_ROOT(cx, &tvr); JS_DestroyIdArray(context, idArray);
JS_DestroyIdArray(cx, idArray);
}
} }
bool operator!() { bool operator!() {
return idArray == NULL; return idArray == NULL;
} }
jsid operator[](size_t i) const { jsid operator[](size_t i) const {
JS_ASSERT(idArray); JS_ASSERT(idArray);
JS_ASSERT(i < size_t(idArray->length)); JS_ASSERT(i < size_t(idArray->length));
return idArray->vector[i]; return idArray->vector[i];
} }
size_t length() const { size_t length() const {
return idArray->length; return idArray->length;
} }
friend void AutoGCRooter::trace(JSTracer *trc);
protected:
inline void trace(JSTracer *trc);
private: private:
JSContext * const cx;
JSIdArray * const idArray; JSIdArray * const idArray;
JSTempValueRooter tvr;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER JS_DECL_USE_GUARD_OBJECT_NOTIFIER
/* No copy or assignment semantics. */
AutoIdArray(AutoIdArray &ida);
void operator=(AutoIdArray &ida);
}; };
/* The auto-root for enumeration object and its state. */ /* The auto-root for enumeration object and its state. */
class JSAutoEnumStateRooter : public JSTempValueRooter class AutoEnumStateRooter : private AutoGCRooter
{ {
public: public:
JSAutoEnumStateRooter(JSContext *cx, JSObject *obj, jsval *statep AutoEnumStateRooter(JSContext *cx, JSObject *obj
JS_GUARD_OBJECT_NOTIFIER_PARAM) JS_GUARD_OBJECT_NOTIFIER_PARAM)
: mContext(cx), mStatep(statep) : AutoGCRooter(cx, ENUMERATOR), obj(obj), stateValue(JSVAL_NULL)
{ {
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
JS_ASSERT(obj); JS_ASSERT(obj);
JS_ASSERT(statep);
JS_PUSH_TEMP_ROOT_COMMON(cx, obj, this, JSTVU_ENUMERATOR, object);
} }
~JSAutoEnumStateRooter() { ~AutoEnumStateRooter() {
JS_POP_TEMP_ROOT(mContext, this); if (!JSVAL_IS_NULL(stateValue)) {
#ifdef DEBUG
JSBool ok =
#endif
obj->enumerate(context, JSENUMERATE_DESTROY, &stateValue, 0);
JS_ASSERT(ok);
}
} }
void mark(JSTracer *trc) { friend void AutoGCRooter::trace(JSTracer *trc);
JS_CALL_OBJECT_TRACER(trc, u.object, "enumerator_obj");
js_MarkEnumeratorState(trc, u.object, *mStatep); jsval state() const { return stateValue; }
jsval * addr() { return &stateValue; }
protected:
void trace(JSTracer *trc) {
JS_CALL_OBJECT_TRACER(trc, obj, "js::AutoEnumStateRooter.obj");
js_MarkEnumeratorState(trc, obj, stateValue);
} }
JSObject * const obj;
private: private:
JSContext *mContext; jsval stateValue;
jsval *mStatep;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER JS_DECL_USE_GUARD_OBJECT_NOTIFIER
}; };
#ifdef JS_HAS_XML_SUPPORT
class AutoXMLRooter : private AutoGCRooter {
public:
AutoXMLRooter(JSContext *cx, JSXML *xml)
: AutoGCRooter(cx, XML), xml(xml)
{
JS_ASSERT(xml);
}
friend void AutoGCRooter::trace(JSTracer *trc);
private:
JSXML * const xml;
};
#endif /* JS_HAS_XML_SUPPORT */
} /* namespace js */
class JSAutoResolveFlags class JSAutoResolveFlags
{ {
public: public:
JSAutoResolveFlags(JSContext *cx, uintN flags JSAutoResolveFlags(JSContext *cx, uintN flags
JS_GUARD_OBJECT_NOTIFIER_PARAM) JS_GUARD_OBJECT_NOTIFIER_PARAM)
: mContext(cx), mSaved(cx->resolveFlags) { : mContext(cx), mSaved(cx->resolveFlags)
{
JS_GUARD_OBJECT_NOTIFIER_INIT; JS_GUARD_OBJECT_NOTIFIER_INIT;
cx->resolveFlags = flags; cx->resolveFlags = flags;
} }
~JSAutoResolveFlags() { mContext->resolveFlags = mSaved; } ~JSAutoResolveFlags() { mContext->resolveFlags = mSaved; }
private: private:
JSContext *mContext; JSContext *mContext;
uintN mSaved; uintN mSaved;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER JS_DECL_USE_GUARD_OBJECT_NOTIFIER
skipping to change at line 2132 skipping to change at line 2250
((void)js_ReportValueErrorFlags(cx, JSREPORT_ERROR, errorNumber, \ ((void)js_ReportValueErrorFlags(cx, JSREPORT_ERROR, errorNumber, \
spindex, v, fallback, arg1, NULL)) spindex, v, fallback, arg1, NULL))
#define js_ReportValueError3(cx,errorNumber,spindex,v,fallback,arg1,arg2) \ #define js_ReportValueError3(cx,errorNumber,spindex,v,fallback,arg1,arg2) \
((void)js_ReportValueErrorFlags(cx, JSREPORT_ERROR, errorNumber, \ ((void)js_ReportValueErrorFlags(cx, JSREPORT_ERROR, errorNumber, \
spindex, v, fallback, arg1, arg2)) spindex, v, fallback, arg1, arg2))
extern JSErrorFormatString js_ErrorFormatString[JSErr_Limit]; extern JSErrorFormatString js_ErrorFormatString[JSErr_Limit];
/* /*
* See JS_SetThreadStackLimit in jsapi.c, where we check that the stack gro * See JS_SetThreadStackLimit in jsapi.c, where we check that the stack
ws * grows in the expected direction.
* in the expected direction. On Unix-y systems, JS_STACK_GROWTH_DIRECTION
is
* computed on the build host by jscpucfg.c and written into jsautocfg.h.
The
* macro is hardcoded in jscpucfg.h on Windows and Mac systems (for histori
cal
* reasons pre-dating autoconf usage).
*/ */
#if JS_STACK_GROWTH_DIRECTION > 0 #if JS_STACK_GROWTH_DIRECTION > 0
# define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) < (cx)->stackLimi t) # define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) < (cx)->stackLimi t)
#else #else
# define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) > (cx)->stackLimi t) # define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) > (cx)->stackLimi t)
#endif #endif
/* /*
* If the operation callback flag was set, call the operation callback. * If the operation callback flag was set, call the operation callback.
* This macro can run the full GC. Return true if it is OK to continue and * This macro can run the full GC. Return true if it is OK to continue and
skipping to change at line 2235 skipping to change at line 2350
static JS_FORCES_STACK JS_INLINE JSStackFrame * static JS_FORCES_STACK JS_INLINE JSStackFrame *
js_GetTopStackFrame(JSContext *cx) js_GetTopStackFrame(JSContext *cx)
{ {
js::LeaveTrace(cx); js::LeaveTrace(cx);
return cx->fp; return cx->fp;
} }
static JS_INLINE JSBool static JS_INLINE JSBool
js_IsPropertyCacheDisabled(JSContext *cx) js_IsPropertyCacheDisabled(JSContext *cx)
{ {
return cx->runtime->shapeGen >= SHAPE_OVERFLOW_BIT; return cx->runtime->shapeGen >= js::SHAPE_OVERFLOW_BIT;
} }
static JS_INLINE uint32 static JS_INLINE uint32
js_RegenerateShapeForGC(JSContext *cx) js_RegenerateShapeForGC(JSContext *cx)
{ {
JS_ASSERT(cx->runtime->gcRunning); JS_ASSERT(cx->runtime->gcRunning);
JS_ASSERT(cx->runtime->gcRegenShapes); JS_ASSERT(cx->runtime->gcRegenShapes);
/* /*
* Under the GC, compared with js_GenerateShape, we don't need to use * Under the GC, compared with js_GenerateShape, we don't need to use
* atomic increments but we still must make sure that after an overflow * atomic increments but we still must make sure that after an overflow
* the shape stays such. * the shape stays such.
*/ */
uint32 shape = cx->runtime->shapeGen; uint32 shape = cx->runtime->shapeGen;
shape = (shape + 1) | (shape & SHAPE_OVERFLOW_BIT); shape = (shape + 1) | (shape & js::SHAPE_OVERFLOW_BIT);
cx->runtime->shapeGen = shape; cx->runtime->shapeGen = shape;
return shape; return shape;
} }
namespace js { namespace js {
inline void * inline void *
ContextAllocPolicy::malloc(size_t bytes) ContextAllocPolicy::malloc(size_t bytes)
{ {
return cx->malloc(bytes); return cx->malloc(bytes);
skipping to change at line 2281 skipping to change at line 2396
{ {
return cx->realloc(p, bytes); return cx->realloc(p, bytes);
} }
inline void inline void
ContextAllocPolicy::reportAllocOverflow() const ContextAllocPolicy::reportAllocOverflow() const
{ {
js_ReportAllocationOverflow(cx); js_ReportAllocationOverflow(cx);
} }
class AutoValueVector : private AutoGCRooter
{
public:
explicit AutoValueVector(JSContext *cx
JS_GUARD_OBJECT_NOTIFIER_PARAM)
: AutoGCRooter(cx, VECTOR), vector(cx)
{
JS_GUARD_OBJECT_NOTIFIER_INIT;
}
size_t length() const { return vector.length(); }
bool push(jsval v) { return vector.append(v); }
bool push(JSString *str) { return push(STRING_TO_JSVAL(str)); }
bool push(JSObject *obj) { return push(OBJECT_TO_JSVAL(obj)); }
bool push(jsdouble *dp) { return push(DOUBLE_TO_JSVAL(dp)); }
void pop() { vector.popBack(); }
bool resize(size_t newLength) {
size_t oldLength = vector.length();
if (!vector.resize(newLength))
return false;
JS_STATIC_ASSERT(JSVAL_NULL == 0);
if (newLength > oldLength)
PodZero(vector.begin(), newLength - oldLength);
return true;
}
bool reserve(size_t newLength) {
return vector.reserve(newLength);
}
jsval & operator[](size_t i) { return vector[i]; }
jsval operator[](size_t i) const { return vector[i]; }
const jsval * buffer() const { return vector.begin(); }
jsval * buffer() { return vector.begin(); }
friend void AutoGCRooter::trace(JSTracer *trc);
private:
Vector<jsval, 8> vector;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
};
} }
#ifdef _MSC_VER
#pragma warning(pop)
#pragma warning(pop)
#endif
#endif /* jscntxt_h___ */ #endif /* jscntxt_h___ */
 End of changes. 70 change blocks. 
264 lines changed or deleted 388 lines changed or added


 jsemit.h   jsemit.h 
skipping to change at line 204 skipping to change at line 204
JSFunctionBox *functionList; JSFunctionBox *functionList;
#ifdef JS_SCOPE_DEPTH_METER #ifdef JS_SCOPE_DEPTH_METER
uint16 scopeDepth; /* current lexical scope chain depth */ uint16 scopeDepth; /* current lexical scope chain depth */
uint16 maxScopeDepth; /* maximum lexical scope chain depth */ uint16 maxScopeDepth; /* maximum lexical scope chain depth */
#endif #endif
JSTreeContext(JSCompiler *jsc) JSTreeContext(JSCompiler *jsc)
: flags(0), ngvars(0), bodyid(0), blockidGen(0), : flags(0), ngvars(0), bodyid(0), blockidGen(0),
topStmt(NULL), topScopeStmt(NULL), blockChain(NULL), blockNode(NULL ), topStmt(NULL), topScopeStmt(NULL), blockChain(NULL), blockNode(NULL ),
compiler(jsc), scopeChain(NULL), parent(NULL), staticLevel(0), compiler(jsc), scopeChain(NULL), parent(jsc->tc), staticLevel(0),
funbox(NULL), functionList(NULL), sharpSlotBase(-1) funbox(NULL), functionList(NULL), sharpSlotBase(-1)
{ {
jsc->tc = this;
JS_SCOPE_DEPTH_METERING(scopeDepth = maxScopeDepth = 0); JS_SCOPE_DEPTH_METERING(scopeDepth = maxScopeDepth = 0);
} }
/* /*
* For functions the tree context is constructed and destructed a secon d * For functions the tree context is constructed and destructed a secon d
* time during code generation. To avoid a redundant stats update in su ch * time during code generation. To avoid a redundant stats update in su ch
* cases, we store uint16(-1) in maxScopeDepth. * cases, we store uint16(-1) in maxScopeDepth.
*/ */
~JSTreeContext() { ~JSTreeContext() {
compiler->tc = this->parent;
JS_SCOPE_DEPTH_METERING_IF((maxScopeDepth != uint16(-1)), JS_SCOPE_DEPTH_METERING_IF((maxScopeDepth != uint16(-1)),
JS_BASIC_STATS_ACCUM(&compiler JS_BASIC_STATS_ACCUM(&compiler
->context ->context
->runtime ->runtime
->lexicalScopeDep thStats, ->lexicalScopeDep thStats,
maxScopeDepth)); maxScopeDepth));
} }
uintN blockid() { return topStmt ? topStmt->blockid : bodyid; } uintN blockid() { return topStmt ? topStmt->blockid : bodyid; }
 End of changes. 3 change blocks. 
1 lines changed or deleted 3 lines changed or added


 jsfun.h   jsfun.h 
skipping to change at line 217 skipping to change at line 217
#ifdef JS_TRACER #ifdef JS_TRACER
/* MSVC demands the intermediate (void *) cast here. */ /* MSVC demands the intermediate (void *) cast here. */
# define JS_TN(name,fastcall,nargs,flags,trcinfo) \ # define JS_TN(name,fastcall,nargs,flags,trcinfo) \
JS_FN(name, JS_DATA_TO_FUNC_PTR(JSNative, trcinfo), nargs, \ JS_FN(name, JS_DATA_TO_FUNC_PTR(JSNative, trcinfo), nargs, \
(flags) | JSFUN_FAST_NATIVE | JSFUN_STUB_GSOPS | JSFUN_TRCINFO) (flags) | JSFUN_FAST_NATIVE | JSFUN_STUB_GSOPS | JSFUN_TRCINFO)
#else #else
# define JS_TN(name,fastcall,nargs,flags,trcinfo) \ # define JS_TN(name,fastcall,nargs,flags,trcinfo) \
JS_FN(name, fastcall, nargs, flags) JS_FN(name, fastcall, nargs, flags)
#endif #endif
/*
* NB: the Arguments class is an uninitialized internal class that masquera
des
* (according to Object.prototype.toString.call(argsobj)) as "Object".
*
* WARNING (to alert embedders reading this private .h file): arguments obj
ects
* are *not* thread-safe and should not be used concurrently -- they should
be
* used by only one thread at a time, preferably by only one thread over th
eir
* lifetime (a JS worker that migrates from one OS thread to another but sh
ares
* nothing is ok).
*
* Yes, this is an incompatible change, which prefigures the impending move
to
* single-threaded objects and GC heaps.
*/
extern JSClass js_ArgumentsClass; extern JSClass js_ArgumentsClass;
inline bool
JSObject::isArguments() const
{
return getClass() == &js_ArgumentsClass;
}
extern JS_FRIEND_DATA(JSClass) js_CallClass; extern JS_FRIEND_DATA(JSClass) js_CallClass;
extern JSClass js_DeclEnvClass; extern JSClass js_DeclEnvClass;
extern const uint32 CALL_CLASS_FIXED_RESERVED_SLOTS; extern const uint32 CALL_CLASS_FIXED_RESERVED_SLOTS;
/* JS_FRIEND_DATA so that VALUE_IS_FUNCTION is callable from the shell. */ /* JS_FRIEND_DATA so that VALUE_IS_FUNCTION is callable from the shell. */
extern JS_FRIEND_DATA(JSClass) js_FunctionClass; extern JS_FRIEND_DATA(JSClass) js_FunctionClass;
inline bool inline bool
JSObject::isFunction() const JSObject::isFunction() const
{ {
return getClass() == &js_FunctionClass; return getClass() == &js_FunctionClass;
} }
#define HAS_FUNCTION_CLASS(obj) (obj)->isFunction()
/* /*
* NB: jsapi.h and jsobj.h must be included before any call to this macro. * NB: jsapi.h and jsobj.h must be included before any call to this macro.
*/ */
#define VALUE_IS_FUNCTION(cx, v) \ #define VALUE_IS_FUNCTION(cx, v) \
(!JSVAL_IS_PRIMITIVE(v) && HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v))) (!JSVAL_IS_PRIMITIVE(v) && JSVAL_TO_OBJECT(v)->isFunction())
/* /*
* Macro to access the private slot of the function object after the slot i s * Macro to access the private slot of the function object after the slot i s
* initialized. * initialized.
*/ */
#define GET_FUNCTION_PRIVATE(cx, funobj) \ #define GET_FUNCTION_PRIVATE(cx, funobj) \
(JS_ASSERT(HAS_FUNCTION_CLASS(funobj)), \ (JS_ASSERT((funobj)->isFunction()), \
(JSFunction *) (funobj)->getPrivate()) (JSFunction *) (funobj)->getPrivate())
namespace js {
/* /*
* Return true if this is a compiler-created internal function accessed by * Return true if this is a compiler-created internal function accessed by
* its own object. Such a function object must not be accessible to script * its own object. Such a function object must not be accessible to script
* or embedding code. * or embedding code.
*/ */
inline bool inline bool
js_IsInternalFunctionObject(JSObject *funobj) IsInternalFunctionObject(JSObject *funobj)
{ {
JS_ASSERT(HAS_FUNCTION_CLASS(funobj)); JS_ASSERT(funobj->isFunction());
JSFunction *fun = (JSFunction *) funobj->getPrivate(); JSFunction *fun = (JSFunction *) funobj->getPrivate();
return funobj == fun && (fun->flags & JSFUN_LAMBDA) && !funobj->getPare nt(); return funobj == fun && (fun->flags & JSFUN_LAMBDA) && !funobj->getPare nt();
} }
namespace js { struct ArgsPrivateNative; } struct ArgsPrivateNative;
inline js::ArgsPrivateNative * inline ArgsPrivateNative *
js_GetArgsPrivateNative(JSObject *argsobj) GetArgsPrivateNative(JSObject *argsobj)
{ {
JS_ASSERT(STOBJ_GET_CLASS(argsobj) == &js_ArgumentsClass); JS_ASSERT(argsobj->isArguments());
uintptr_t p = (uintptr_t) argsobj->getPrivate(); uintptr_t p = (uintptr_t) argsobj->getPrivate();
return (js::ArgsPrivateNative *) (p & 2 ? p & ~2 : NULL); return (ArgsPrivateNative *) (p & 2 ? p & ~2 : NULL);
} }
} /* namespace js */
extern JSObject * extern JSObject *
js_InitFunctionClass(JSContext *cx, JSObject *obj); js_InitFunctionClass(JSContext *cx, JSObject *obj);
extern JSObject * extern JSObject *
js_InitArgumentsClass(JSContext *cx, JSObject *obj); js_InitArgumentsClass(JSContext *cx, JSObject *obj);
extern JSFunction * extern JSFunction *
js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN narg s, js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN narg s,
uintN flags, JSObject *parent, JSAtom *atom); uintN flags, JSObject *parent, JSAtom *atom);
skipping to change at line 402 skipping to change at line 424
js_IsNamedLambda(JSFunction *fun) { return (fun->flags & JSFUN_LAMBDA) && f un->atom; } js_IsNamedLambda(JSFunction *fun) { return (fun->flags & JSFUN_LAMBDA) && f un->atom; }
/* /*
* Reserved slot structure for Arguments objects: * Reserved slot structure for Arguments objects:
* *
* JSSLOT_PRIVATE - the corresponding frame until the frame exits. * JSSLOT_PRIVATE - the corresponding frame until the frame exits.
* JSSLOT_ARGS_LENGTH - the number of actual arguments and a flag indicat ing * JSSLOT_ARGS_LENGTH - the number of actual arguments and a flag indicat ing
* whether arguments.length was overwritten. * whether arguments.length was overwritten.
* JSSLOT_ARGS_CALLEE - the arguments.callee value or JSVAL_HOLE if that was * JSSLOT_ARGS_CALLEE - the arguments.callee value or JSVAL_HOLE if that was
* overwritten. * overwritten.
* JSSLOT_ARGS_COPY_START .. - room to store the corresponding arguments af * JSSLOT_ARGS_START - room to store the corresponding arguments after t
ter he
* the frame exists. The slot's value will be JSVAL_ * frame exists. The slot's value will be JSVAL_HOLE
HOLE if
* if arguments[i] was deleted or overwritten. * arguments[i] was deleted or overwritten.
*
* The static assertion checks that hand-optimized code can fetch and store
the
* argument value at argsobj->dslots[i] for argument index i. But future-pr
oof
* your code by using {Get,Set}ArgsSlot instead of naked dslots references.
*/ */
const uint32 JSSLOT_ARGS_LENGTH = JSSLOT_PRIVATE + 1; const uint32 JSSLOT_ARGS_LENGTH = JSSLOT_PRIVATE + 1;
const uint32 JSSLOT_ARGS_CALLEE = JSSLOT_PRIVATE + 2; const uint32 JSSLOT_ARGS_CALLEE = JSSLOT_PRIVATE + 2;
const uint32 JSSLOT_ARGS_COPY_START = JSSLOT_PRIVATE + 3; const uint32 JSSLOT_ARGS_START = JSSLOT_PRIVATE + 3;
JS_STATIC_ASSERT(JSSLOT_ARGS_START == JS_INITIAL_NSLOTS);
/* Number of extra fixed slots besides JSSLOT_PRIVATE. */ /* Number of extra fixed slots besides JSSLOT_PRIVATE. */
const uint32 ARGS_CLASS_FIXED_RESERVED_SLOTS = JSSLOT_ARGS_COPY_START - const uint32 ARGS_FIXED_RESERVED_SLOTS = JSSLOT_ARGS_START - JSSLOT_ARGS_LE
JSSLOT_ARGS_LENGTH; NGTH;
/*
* Maximum supported value of arguments.length. It bounds the maximum numbe
r of
* arguments that can be supplied via the second (so-called |argArray|) par
am
* to Function.prototype.apply. This value also bounds the number of elemen
ts
* parsed in an array initialiser.
*/
const uint32 JS_ARGS_LENGTH_MAX = JS_BIT(24) - 1;
/* /*
* JSSLOT_ARGS_LENGTH stores ((argc << 1) | overwritten_flag) as int jsval. * JSSLOT_ARGS_LENGTH stores ((argc << 1) | overwritten_flag) as int jsval.
* Thus (JS_ARGS_LENGTH_MAX << 1) | 1 must fit JSVAL_INT_MAX. To assert tha t * Thus (JS_ARGS_LENGTH_MAX << 1) | 1 must fit JSVAL_INT_MAX. To assert tha t
* we check first that the shift does not overflow uint32. * we check first that the shift does not overflow uint32.
*/ */
JS_STATIC_ASSERT(JS_ARGS_LENGTH_MAX <= JS_BIT(30)); JS_STATIC_ASSERT(JS_ARGS_LENGTH_MAX <= JS_BIT(30));
JS_STATIC_ASSERT(jsval((JS_ARGS_LENGTH_MAX << 1) | 1) <= JSVAL_INT_MAX); JS_STATIC_ASSERT(jsval((JS_ARGS_LENGTH_MAX << 1) | 1) <= JSVAL_INT_MAX);
JS_INLINE bool namespace js {
js_IsOverriddenArgsLength(JSObject *obj)
inline jsval
GetArgsSlot(JSObject *argsobj, uint32 arg)
{ {
JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass); return argsobj->dslots[arg];
}
inline void
SetArgsSlot(JSObject *argsobj, uint32 arg, jsval v)
{
argsobj->dslots[arg] = v;
}
inline bool
IsOverriddenArgsLength(JSObject *obj)
{
JS_ASSERT(obj->isArguments());
jsval v = obj->fslots[JSSLOT_ARGS_LENGTH]; jsval v = obj->fslots[JSSLOT_ARGS_LENGTH];
return (JSVAL_TO_INT(v) & 1) != 0; return (JSVAL_TO_INT(v) & 1) != 0;
} }
inline uint32
GetArgsLength(JSObject *obj)
{
JS_ASSERT(obj->isArguments());
uint32 argc = uint32(JSVAL_TO_INT(obj->fslots[JSSLOT_ARGS_LENGTH])) >>
1;
JS_ASSERT(argc <= JS_ARGS_LENGTH_MAX);
return argc;
}
} /* namespace js */
extern JSBool extern JSBool
js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp); js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp);
typedef enum JSLocalKind { typedef enum JSLocalKind {
JSLOCAL_NONE, JSLOCAL_NONE,
JSLOCAL_ARG, JSLOCAL_ARG,
JSLOCAL_VAR, JSLOCAL_VAR,
JSLOCAL_CONST, JSLOCAL_CONST,
JSLOCAL_UPVAR JSLOCAL_UPVAR
} JSLocalKind; } JSLocalKind;
 End of changes. 19 change blocks. 
24 lines changed or deleted 98 lines changed or added


 jsgc.h   jsgc.h 
skipping to change at line 51 skipping to change at line 51
#define jsgc_h___ #define jsgc_h___
/* /*
* JS Garbage Collector. * JS Garbage Collector.
*/ */
#include "jsprvtd.h" #include "jsprvtd.h"
#include "jspubtd.h" #include "jspubtd.h"
#include "jsdhash.h" #include "jsdhash.h"
#include "jsbit.h" #include "jsbit.h"
#include "jsutil.h" #include "jsutil.h"
#include "jstask.h" #include "jstask.h"
#include "jsversion.h"
JS_BEGIN_EXTERN_C JS_BEGIN_EXTERN_C
#define JSTRACE_XML 3 #define JSTRACE_XML 3
/* /*
* One past the maximum trace kind. * One past the maximum trace kind.
*/ */
#define JSTRACE_LIMIT 4 #define JSTRACE_LIMIT 4
skipping to change at line 457 skipping to change at line 458
/* /*
* This function is defined in jsdbgapi.cpp but is declared here to avoid * This function is defined in jsdbgapi.cpp but is declared here to avoid
* polluting jsdbgapi.h, a public API header, with internal functions. * polluting jsdbgapi.h, a public API header, with internal functions.
*/ */
extern void extern void
js_MarkTraps(JSTracer *trc); js_MarkTraps(JSTracer *trc);
JS_END_EXTERN_C JS_END_EXTERN_C
namespace js {
void
TraceObjectVector(JSTracer *trc, JSObject **vec, uint32 len);
inline void
TraceValues(JSTracer *trc, size_t len, jsval *vec, const char *name)
{
for (jsval *vp = vec, *end = vp + len; vp < end; vp++) {
jsval v = *vp;
if (JSVAL_IS_TRACEABLE(v)) {
JS_SET_TRACING_INDEX(trc, name, vp - vec);
js_CallGCMarker(trc, JSVAL_TO_TRACEABLE(v), JSVAL_TRACE_KIND(v)
);
}
}
}
}
#endif /* jsgc_h___ */ #endif /* jsgc_h___ */
 End of changes. 2 change blocks. 
0 lines changed or deleted 21 lines changed or added


 jshashtable.h   jshashtable.h 
skipping to change at line 81 skipping to change at line 81
class Entry { class Entry {
HashNumber keyHash; HashNumber keyHash;
public: public:
Entry() : keyHash(0), t() {} Entry() : keyHash(0), t() {}
void operator=(const Entry &rhs) { keyHash = rhs.keyHash; assignT(t , rhs.t); } void operator=(const Entry &rhs) { keyHash = rhs.keyHash; assignT(t , rhs.t); }
NonConstT t; NonConstT t;
bool isFree() const { return keyHash == 0; } bool isFree() const { return keyHash == 0; }
void setFree() { keyHash = 0; t = T(); } void setFree() { keyHash = 0; assignT(t, T()); }
bool isRemoved() const { return keyHash == 1; } bool isRemoved() const { return keyHash == 1; }
void setRemoved() { keyHash = 1; t = T(); } void setRemoved() { keyHash = 1; assignT(t, T()); }
bool isLive() const { return keyHash > 1; } bool isLive() const { return keyHash > 1; }
void setLive(HashNumber hn) { JS_ASSERT(hn > 1); keyHash = hn; } void setLive(HashNumber hn) { JS_ASSERT(hn > 1); keyHash = hn; }
void setCollision() { JS_ASSERT(keyHash > 1); keyHash |= sCollisionBit; } void setCollision() { JS_ASSERT(keyHash > 1); keyHash |= sCollisionBit; }
void unsetCollision() { JS_ASSERT(keyHash > 1); keyHash &= ~sCollisionBit; } void unsetCollision() { JS_ASSERT(keyHash > 1); keyHash &= ~sCollisionBit; }
bool hasCollision() const { JS_ASSERT(keyHash > 1); return keyH ash & sCollisionBit; } bool hasCollision() const { JS_ASSERT(keyHash > 1); return keyH ash & sCollisionBit; }
bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) = = hn; } bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) = = hn; }
HashNumber getKeyHash() const { JS_ASSERT(!hasCollision()); return keyHash; } HashNumber getKeyHash() const { JS_ASSERT(!hasCollision()); return keyHash; }
}; };
skipping to change at line 112 skipping to change at line 112
friend class HashTable; friend class HashTable;
typedef void (Ptr::* ConvertibleToBool)(); typedef void (Ptr::* ConvertibleToBool)();
void nonNull() {} void nonNull() {}
Entry *entry; Entry *entry;
protected: protected:
Ptr(Entry &entry) : entry(&entry) {} Ptr(Entry &entry) : entry(&entry) {}
public: public:
bool found() const { return entry->isLive(); } bool found() const { return entry->isLive(); }
operator ConvertibleToBool() { return found() ? &Ptr::nonNull : 0; operator ConvertibleToBool() const { return found() ? &Ptr::nonN
} ull : 0; }
bool operator==(const Ptr &rhs) const { JS_ASSERT(found() && rhs.fo
und()); return entry == rhs.entry; }
bool operator!=(const Ptr &rhs) const { return !(*this == rhs); }
T &operator*() const { return entry->t; } T &operator*() const { return entry->t; }
T *operator->() const { return &entry->t; } T *operator->() const { return &entry->t; }
}; };
/* A Ptr that can be used to add a key after a failed lookup. */ /* A Ptr that can be used to add a key after a failed lookup. */
class AddPtr : public Ptr class AddPtr : public Ptr
{ {
friend class HashTable; friend class HashTable;
AddPtr(Entry &entry, HashNumber hn) : Ptr(entry), keyHash(hn) {} AddPtr(Entry &entry, HashNumber hn) : Ptr(entry), keyHash(hn) {}
HashNumber keyHash; HashNumber keyHash;
}; };
skipping to change at line 143 skipping to change at line 145
class Range class Range
{ {
protected: protected:
friend class HashTable; friend class HashTable;
Range(Entry *c, Entry *e) : cur(c), end(e) { Range(Entry *c, Entry *e) : cur(c), end(e) {
while (cur != end && !cur->isLive()) while (cur != end && !cur->isLive())
++cur; ++cur;
} }
Entry *cur, * const end; Entry *cur, *end;
public: public:
bool empty() const { bool empty() const {
return cur == end; return cur == end;
} }
const T &front() const { const T &front() const {
JS_ASSERT(!empty()); JS_ASSERT(!empty());
return cur->t; return cur->t;
} }
skipping to change at line 182 skipping to change at line 184
friend class HashTable; friend class HashTable;
HashTable &table; HashTable &table;
bool removed; bool removed;
/* Not copyable. */ /* Not copyable. */
Enum(const Enum &); Enum(const Enum &);
void operator=(const Enum &); void operator=(const Enum &);
public: public:
/* Type returned from hash table used to initialize Enum object. */ template<class Map>
struct Init { Enum(Map &map) : Range(map.all()), table(map.impl), removed(false)
Init(Range r, HashTable &t) : range(r), table(t) {} {}
Range range;
HashTable &table;
};
/* Initialize with the return value of enumerate. */
Enum(Init i) : Range(i.range), table(i.table), removed(false) {}
/* /*
* Removes the |front()| element from the table, leaving |front()| * Removes the |front()| element from the table, leaving |front()|
* invalid until the next call to |popFront()|. For example: * invalid until the next call to |popFront()|. For example:
* *
* HashSet<int> s; * HashSet<int> s;
* for (HashSet<int>::Enum e(s.enumerate()); !e.empty(); e.popFro nt()) * for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
* if (e.front() == 42) * if (e.front() == 42)
* e.removeFront(); * e.removeFront();
*/ */
void removeFront() { void removeFront() {
table.remove(*this->cur); table.remove(*this->cur);
removed = true; removed = true;
} }
/* Potentially rehashes the table. */ /* Potentially rehashes the table. */
~Enum() { ~Enum() {
skipping to change at line 532 skipping to change at line 527
for (Entry *e = table, *end = table + tableCapacity; e != end; ++e) for (Entry *e = table, *end = table + tableCapacity; e != end; ++e)
*e = Entry(); *e = Entry();
removedCount = 0; removedCount = 0;
entryCount = 0; entryCount = 0;
} }
Range all() const { Range all() const {
return Range(table, table + tableCapacity); return Range(table, table + tableCapacity);
} }
typename Enum::Init enumerate() {
return typename Enum::Init(all(), *this);
}
bool empty() const { bool empty() const {
return !entryCount; return !entryCount;
} }
uint32 count() const{ uint32 count() const{
return entryCount; return entryCount;
} }
uint32 generation() const { uint32 generation() const {
return gen; return gen;
skipping to change at line 718 skipping to change at line 709
private: private:
/* Implement HashMap using HashTable. Lift |Key| operations to |Entry|. */ /* Implement HashMap using HashTable. Lift |Key| operations to |Entry|. */
struct MapHashPolicy : HashPolicy struct MapHashPolicy : HashPolicy
{ {
typedef Key KeyType; typedef Key KeyType;
static const Key &getKey(Entry &e) { return e.key; } static const Key &getKey(Entry &e) { return e.key; }
}; };
typedef detail::HashTable<Entry, MapHashPolicy, AllocPolicy> Impl; typedef detail::HashTable<Entry, MapHashPolicy, AllocPolicy> Impl;
friend class Impl::Enum;
/* Not implicitly copyable (expensive). May add explicit |clone| later. */ /* Not implicitly copyable (expensive). May add explicit |clone| later. */
HashMap(const HashMap &); HashMap(const HashMap &);
HashMap &operator=(const HashMap &); HashMap &operator=(const HashMap &);
Impl impl; Impl impl;
public: public:
/* /*
* HashMap construction is fallible (due to OOM); thus the user must ca ll * HashMap construction is fallible (due to OOM); thus the user must ca ll
* init after constructing a HashMap and check the return value. * init after constructing a HashMap and check the return value.
skipping to change at line 788 skipping to change at line 781
* for (HM::Range r = h.all(); !r.empty(); r.popFront()) * for (HM::Range r = h.all(); !r.empty(); r.popFront())
* char c = r.front().value; * char c = r.front().value;
* *
* Also see the definition of Range in HashTable above (with T = Entry) . * Also see the definition of Range in HashTable above (with T = Entry) .
*/ */
typedef typename Impl::Range Range; typedef typename Impl::Range Range;
Range all() const { return impl.all(); } Range all() const { return impl.all(); }
size_t count() const { return impl.count() ; } size_t count() const { return impl.count() ; }
/* /*
* Returns a value that may be used to initialize an Enum. An Enum may * Typedef for the enumeration class. An Enum may be used to examine an
be d
* used to examine and remove table entries: * remove table entries:
* *
* typedef HashMap<int,char> HM; * typedef HashMap<int,char> HM;
* HM s; * HM s;
* for (HM::Enum e(s.enumerate()); !e.empty(); e.popFront()) * for (HM::Enum e(s); !e.empty(); e.popFront())
* if (e.front().value == 'l') * if (e.front().value == 'l')
* e.removeFront(); * e.removeFront();
* *
* Table resize may occur in Enum's destructor. Also see the definition of * Table resize may occur in Enum's destructor. Also see the definition of
* Enum in HashTable above (with T = Entry). * Enum in HashTable above (with T = Entry).
*/ */
typedef typename Impl::Enum Enum; typedef typename Impl::Enum Enum;
typename Enum::Init enumerate() { return impl.enumera te(); }
/* Remove all entries. */ /* Remove all entries. */
void clear() { impl.clear(); } void clear() { impl.clear(); }
/* Does the table contain any entries? */ /* Does the table contain any entries? */
bool empty() const { return impl.empty() ; } bool empty() const { return impl.empty() ; }
/* /*
* If |generation()| is the same before and after a HashMap operation, * If |generation()| is the same before and after a HashMap operation,
* pointers into the table remain valid. * pointers into the table remain valid.
skipping to change at line 864 skipping to change at line 856
{ {
typedef typename HashPolicy::Lookup Lookup; typedef typename HashPolicy::Lookup Lookup;
/* Implement HashSet in terms of HashTable. */ /* Implement HashSet in terms of HashTable. */
struct SetOps : HashPolicy { struct SetOps : HashPolicy {
typedef T KeyType; typedef T KeyType;
static const KeyType &getKey(const T &t) { return t; } static const KeyType &getKey(const T &t) { return t; }
}; };
typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl; typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
friend class Impl::Enum;
/* Not implicitly copyable (expensive). May add explicit |clone| later. */ /* Not implicitly copyable (expensive). May add explicit |clone| later. */
HashSet(const HashSet &); HashSet(const HashSet &);
HashSet &operator=(const HashSet &); HashSet &operator=(const HashSet &);
Impl impl; Impl impl;
public: public:
/* /*
* HashSet construction is fallible (due to OOM); thus the user must ca ll