00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "ruby/ruby.h"
00012 #include "ruby/vm.h"
00013 #include "ruby/st.h"
00014 #include "ruby/encoding.h"
00015 #include "internal.h"
00016
00017 #include "gc.h"
00018 #include "vm_core.h"
00019 #include "iseq.h"
00020 #include "eval_intern.h"
00021
00022 #include "vm_insnhelper.h"
00023 #include "vm_insnhelper.c"
00024 #include "vm_exec.h"
00025 #include "vm_exec.c"
00026
00027 #include "vm_method.c"
00028 #include "vm_eval.c"
00029
00030 #include <assert.h>
00031
00032 #define BUFSIZE 0x100
00033 #define PROCDEBUG 0
00034
00035 VALUE rb_cRubyVM;
00036 VALUE rb_cThread;
00037 VALUE rb_cEnv;
00038 VALUE rb_mRubyVMFrozenCore;
00039
00040 VALUE ruby_vm_const_missing_count = 0;
00041
00042 char ruby_vm_redefined_flag[BOP_LAST_];
00043
00044 rb_thread_t *ruby_current_thread = 0;
00045 rb_vm_t *ruby_current_vm = 0;
00046
00047 static void thread_free(void *ptr);
00048
00049 void vm_analysis_operand(int insn, int n, VALUE op);
00050 void vm_analysis_register(int reg, int isset);
00051 void vm_analysis_insn(int insn);
00052
00053
00054
00055
00056
00057
00058
00059 RUBY_FUNC_EXPORTED VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
00060 RUBY_FUNC_EXPORTED int rb_vm_get_sourceline(const rb_control_frame_t *cfp);
00061
00062 void
00063 rb_vm_change_state(void)
00064 {
00065 INC_VM_STATE_VERSION();
00066 }
00067
00068 static void vm_clear_global_method_cache(void);
00069
00070 static void
00071 vm_clear_all_inline_method_cache(void)
00072 {
00073
00074
00075
00076
00077 }
00078
00079 static void
00080 vm_clear_all_cache()
00081 {
00082 vm_clear_global_method_cache();
00083 vm_clear_all_inline_method_cache();
00084 ruby_vm_global_state_version = 1;
00085 }
00086
00087 void
00088 rb_vm_inc_const_missing_count(void)
00089 {
00090 ruby_vm_const_missing_count +=1;
00091 }
00092
00093
00094
00095 static inline VALUE
00096 rb_vm_set_finish_env(rb_thread_t * th)
00097 {
00098 vm_push_frame(th, 0, VM_FRAME_MAGIC_FINISH,
00099 Qnil, th->cfp->lfp[0], 0,
00100 th->cfp->sp, 0, 1);
00101 th->cfp->pc = (VALUE *)&finish_insn_seq[0];
00102 return Qtrue;
00103 }
00104
00105 static void
00106 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
00107 {
00108 rb_iseq_t *iseq;
00109 GetISeqPtr(iseqval, iseq);
00110
00111 if (iseq->type != ISEQ_TYPE_TOP) {
00112 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
00113 }
00114
00115
00116 rb_vm_set_finish_env(th);
00117
00118 vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP,
00119 th->top_self, 0, iseq->iseq_encoded,
00120 th->cfp->sp, 0, iseq->local_size);
00121
00122 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00123 }
00124
00125 static void
00126 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref)
00127 {
00128 rb_iseq_t *iseq;
00129 rb_block_t * const block = th->base_block;
00130 GetISeqPtr(iseqval, iseq);
00131
00132
00133 rb_vm_set_finish_env(th);
00134 vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL, block->self,
00135 GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded,
00136 th->cfp->sp, block->lfp, iseq->local_size);
00137
00138 if (cref) {
00139 th->cfp->dfp[-1] = (VALUE)cref;
00140 }
00141
00142 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00143 }
00144
00145 static void
00146 vm_set_main_stack(rb_thread_t *th, VALUE iseqval)
00147 {
00148 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
00149 rb_binding_t *bind;
00150 rb_iseq_t *iseq;
00151 rb_env_t *env;
00152
00153 GetBindingPtr(toplevel_binding, bind);
00154 GetEnvPtr(bind->env, env);
00155 th->base_block = &env->block;
00156 vm_set_eval_stack(th, iseqval, 0);
00157 th->base_block = 0;
00158
00159
00160 GetISeqPtr(iseqval, iseq);
00161 if (bind && iseq->local_size > 0) {
00162 bind->env = rb_vm_make_env_object(th, th->cfp);
00163 }
00164
00165 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00166 }
00167
00168 rb_control_frame_t *
00169 rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00170 {
00171 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00172 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00173 return cfp;
00174 }
00175 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00176 }
00177 return 0;
00178 }
00179
00180 static rb_control_frame_t *
00181 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00182 {
00183 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00184 return cfp;
00185 }
00186
00187 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00188
00189 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00190 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00191 return cfp;
00192 }
00193
00194 if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
00195 break;
00196 }
00197 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00198 }
00199 return 0;
00200 }
00201
00202
00203
00204 void
00205 ruby_vm_at_exit(void (*func)(rb_vm_t *))
00206 {
00207 rb_ary_push((VALUE)&GET_VM()->at_exit, (VALUE)func);
00208 }
00209
00210 static void
00211 ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
00212 {
00213 VALUE hook = (VALUE)&vm->at_exit;
00214
00215 while (RARRAY_LEN(hook) > 0) {
00216 typedef void rb_vm_at_exit_func(rb_vm_t*);
00217 rb_vm_at_exit_func *func = (rb_vm_at_exit_func*)rb_ary_pop(hook);
00218 (*func)(vm);
00219 }
00220 rb_ary_free(hook);
00221 }
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233 #define ENV_IN_HEAP_P(th, env) \
00234 (!((th)->stack < (env) && (env) < ((th)->stack + (th)->stack_size)))
00235 #define ENV_VAL(env) ((env)[1])
00236
00237 static void
00238 env_mark(void * const ptr)
00239 {
00240 RUBY_MARK_ENTER("env");
00241 if (ptr) {
00242 const rb_env_t * const env = ptr;
00243
00244 if (env->env) {
00245
00246 RUBY_GC_INFO("env->env\n");
00247 rb_gc_mark_locations(env->env, env->env + env->env_size);
00248 }
00249
00250 RUBY_GC_INFO("env->prev_envval\n");
00251 RUBY_MARK_UNLESS_NULL(env->prev_envval);
00252 RUBY_MARK_UNLESS_NULL(env->block.self);
00253 RUBY_MARK_UNLESS_NULL(env->block.proc);
00254
00255 if (env->block.iseq) {
00256 if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
00257 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
00258 }
00259 else {
00260 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
00261 }
00262 }
00263 }
00264 RUBY_MARK_LEAVE("env");
00265 }
00266
00267 static void
00268 env_free(void * const ptr)
00269 {
00270 RUBY_FREE_ENTER("env");
00271 if (ptr) {
00272 rb_env_t *const env = ptr;
00273 RUBY_FREE_UNLESS_NULL(env->env);
00274 ruby_xfree(ptr);
00275 }
00276 RUBY_FREE_LEAVE("env");
00277 }
00278
00279 static size_t
00280 env_memsize(const void *ptr)
00281 {
00282 if (ptr) {
00283 const rb_env_t * const env = ptr;
00284 size_t size = sizeof(rb_env_t);
00285 if (env->env) {
00286 size += env->env_size * sizeof(VALUE);
00287 }
00288 return size;
00289 }
00290 return 0;
00291 }
00292
00293 static const rb_data_type_t env_data_type = {
00294 "VM/env",
00295 {env_mark, env_free, env_memsize,},
00296 };
00297
00298 static VALUE
00299 env_alloc(void)
00300 {
00301 VALUE obj;
00302 rb_env_t *env;
00303 obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
00304 env->env = 0;
00305 env->prev_envval = 0;
00306 env->block.iseq = 0;
00307 return obj;
00308 }
00309
00310 static VALUE check_env_value(VALUE envval);
00311
00312 static int
00313 check_env(rb_env_t * const env)
00314 {
00315 printf("---\n");
00316 printf("envptr: %p\n", (void *)&env->block.dfp[0]);
00317 printf("orphan: %p\n", (void *)env->block.dfp[1]);
00318 printf("inheap: %p\n", (void *)env->block.dfp[2]);
00319 printf("envval: %10p ", (void *)env->block.dfp[3]);
00320 dp(env->block.dfp[3]);
00321 printf("penvv : %10p ", (void *)env->block.dfp[4]);
00322 dp(env->block.dfp[4]);
00323 printf("lfp: %10p\n", (void *)env->block.lfp);
00324 printf("dfp: %10p\n", (void *)env->block.dfp);
00325 if (env->block.dfp[4]) {
00326 printf(">>\n");
00327 check_env_value(env->block.dfp[4]);
00328 printf("<<\n");
00329 }
00330 return 1;
00331 }
00332
00333 static VALUE
00334 check_env_value(VALUE envval)
00335 {
00336 rb_env_t *env;
00337 GetEnvPtr(envval, env);
00338
00339 if (check_env(env)) {
00340 return envval;
00341 }
00342 rb_bug("invalid env");
00343 return Qnil;
00344 }
00345
00346 static VALUE
00347 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
00348 VALUE *envptr, VALUE * const endptr)
00349 {
00350 VALUE envval, penvval = 0;
00351 rb_env_t *env;
00352 VALUE *nenvptr;
00353 int i, local_size;
00354
00355 if (ENV_IN_HEAP_P(th, envptr)) {
00356 return ENV_VAL(envptr);
00357 }
00358
00359 if (envptr != endptr) {
00360 VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
00361 rb_control_frame_t *pcfp = cfp;
00362
00363 if (ENV_IN_HEAP_P(th, penvptr)) {
00364 penvval = ENV_VAL(penvptr);
00365 }
00366 else {
00367 while (pcfp->dfp != penvptr) {
00368 pcfp++;
00369 if (pcfp->dfp == 0) {
00370 SDR();
00371 rb_bug("invalid dfp");
00372 }
00373 }
00374 penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
00375 cfp->lfp = pcfp->lfp;
00376 *envptr = GC_GUARDED_PTR(pcfp->dfp);
00377 }
00378 }
00379
00380
00381 envval = env_alloc();
00382 GetEnvPtr(envval, env);
00383
00384 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00385 local_size = 2;
00386 }
00387 else {
00388 local_size = cfp->iseq->local_size;
00389 }
00390
00391 env->env_size = local_size + 1 + 2;
00392 env->local_size = local_size;
00393 env->env = ALLOC_N(VALUE, env->env_size);
00394 env->prev_envval = penvval;
00395
00396 for (i = 0; i <= local_size; i++) {
00397 env->env[i] = envptr[-local_size + i];
00398 #if 0
00399 fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
00400 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00401
00402 envptr[-local_size + i] = 0;
00403 }
00404 #endif
00405 }
00406
00407 *envptr = envval;
00408 nenvptr = &env->env[i - 1];
00409 nenvptr[1] = envval;
00410 nenvptr[2] = penvval;
00411
00412
00413 cfp->dfp = nenvptr;
00414 if (envptr == endptr) {
00415 cfp->lfp = nenvptr;
00416 }
00417
00418
00419 env->block.self = cfp->self;
00420 env->block.lfp = cfp->lfp;
00421 env->block.dfp = cfp->dfp;
00422 env->block.iseq = cfp->iseq;
00423
00424 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00425
00426 env->block.iseq = 0;
00427 } else {
00428 rb_vm_rewrite_dfp_in_errinfo(th, cfp);
00429 }
00430 return envval;
00431 }
00432
00433 static int
00434 collect_local_variables_in_iseq(rb_iseq_t *iseq, const VALUE ary)
00435 {
00436 int i;
00437 if (!iseq) return 0;
00438 for (i = 0; i < iseq->local_table_size; i++) {
00439 ID lid = iseq->local_table[i];
00440 if (rb_is_local_id(lid)) {
00441 rb_ary_push(ary, ID2SYM(lid));
00442 }
00443 }
00444 return 1;
00445 }
00446
00447 static int
00448 collect_local_variables_in_env(rb_env_t * env, const VALUE ary)
00449 {
00450
00451 while (collect_local_variables_in_iseq(env->block.iseq, ary),
00452 env->prev_envval) {
00453 GetEnvPtr(env->prev_envval, env);
00454 }
00455 return 0;
00456 }
00457
00458 static int
00459 vm_collect_local_variables_in_heap(rb_thread_t *th, VALUE *dfp, VALUE ary)
00460 {
00461 if (ENV_IN_HEAP_P(th, dfp)) {
00462 rb_env_t *env;
00463 GetEnvPtr(ENV_VAL(dfp), env);
00464 collect_local_variables_in_env(env, ary);
00465 return 1;
00466 }
00467 else {
00468 return 0;
00469 }
00470 }
00471
00472 VALUE
00473 rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
00474 {
00475 VALUE envval;
00476
00477 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_FINISH) {
00478
00479 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00480 }
00481
00482 envval = vm_make_env_each(th, cfp, cfp->dfp, cfp->lfp);
00483
00484 if (PROCDEBUG) {
00485 check_env_value(envval);
00486 }
00487
00488 return envval;
00489 }
00490
00491 void
00492 rb_vm_rewrite_dfp_in_errinfo(rb_thread_t *th, rb_control_frame_t *cfp)
00493 {
00494
00495 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) &&
00496 (cfp->iseq->type == ISEQ_TYPE_RESCUE ||
00497 cfp->iseq->type == ISEQ_TYPE_ENSURE)) {
00498 VALUE errinfo = cfp->dfp[-2];
00499 if (RB_TYPE_P(errinfo, T_NODE)) {
00500 VALUE *escape_dfp = GET_THROWOBJ_CATCH_POINT(errinfo);
00501 if (! ENV_IN_HEAP_P(th, escape_dfp)) {
00502 VALUE dfpval = *escape_dfp;
00503 if (CLASS_OF(dfpval) == rb_cEnv) {
00504 rb_env_t *dfpenv;
00505 GetEnvPtr(dfpval, dfpenv);
00506 SET_THROWOBJ_CATCH_POINT(errinfo, (VALUE)(dfpenv->env + dfpenv->local_size));
00507 }
00508 }
00509 }
00510 }
00511 }
00512
00513 void
00514 rb_vm_stack_to_heap(rb_thread_t *th)
00515 {
00516 rb_control_frame_t *cfp = th->cfp;
00517 while ((cfp = rb_vm_get_ruby_level_next_cfp(th, cfp)) != 0) {
00518 rb_vm_make_env_object(th, cfp);
00519 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00520 }
00521 }
00522
00523
00524
00525 static VALUE
00526 vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block)
00527 {
00528 if (!block->proc) {
00529 block->proc = rb_vm_make_proc(th, block, rb_cProc);
00530 }
00531 return block->proc;
00532 }
00533
00534 VALUE
00535 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
00536 {
00537 VALUE procval, envval, blockprocval = 0;
00538 rb_proc_t *proc;
00539 rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
00540
00541 if (block->proc) {
00542 rb_bug("rb_vm_make_proc: Proc value is already created.");
00543 }
00544
00545 if (GC_GUARDED_PTR_REF(cfp->lfp[0])) {
00546 rb_proc_t *p;
00547
00548 blockprocval = vm_make_proc_from_block(
00549 th, (rb_block_t *)GC_GUARDED_PTR_REF(*cfp->lfp));
00550
00551 GetProcPtr(blockprocval, p);
00552 *cfp->lfp = GC_GUARDED_PTR(&p->block);
00553 }
00554
00555 envval = rb_vm_make_env_object(th, cfp);
00556
00557 if (PROCDEBUG) {
00558 check_env_value(envval);
00559 }
00560 procval = rb_proc_alloc(klass);
00561 GetProcPtr(procval, proc);
00562 proc->blockprocval = blockprocval;
00563 proc->block.self = block->self;
00564 proc->block.lfp = block->lfp;
00565 proc->block.dfp = block->dfp;
00566 proc->block.iseq = block->iseq;
00567 proc->block.proc = procval;
00568 proc->envval = envval;
00569 proc->safe_level = th->safe_level;
00570
00571 if (VMDEBUG) {
00572 if (th->stack < block->dfp && block->dfp < th->stack + th->stack_size) {
00573 rb_bug("invalid ptr: block->dfp");
00574 }
00575 if (th->stack < block->lfp && block->lfp < th->stack + th->stack_size) {
00576 rb_bug("invalid ptr: block->lfp");
00577 }
00578 }
00579
00580 return procval;
00581 }
00582
00583
00584
00585 static inline VALUE
00586 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
00587 VALUE self, int argc, const VALUE *argv,
00588 const rb_block_t *blockptr, const NODE *cref)
00589 {
00590 if (SPECIAL_CONST_P(block->iseq))
00591 return Qnil;
00592 else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
00593 const rb_iseq_t *iseq = block->iseq;
00594 const rb_control_frame_t *cfp;
00595 rb_control_frame_t *ncfp;
00596 int i, opt_pc, arg_size = iseq->arg_size;
00597 int type = block_proc_is_lambda(block->proc) ?
00598 VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
00599
00600 rb_vm_set_finish_env(th);
00601
00602 cfp = th->cfp;
00603 CHECK_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
00604
00605 for (i=0; i<argc; i++) {
00606 cfp->sp[i] = argv[i];
00607 }
00608
00609 opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
00610 type == VM_FRAME_MAGIC_LAMBDA);
00611
00612 ncfp = vm_push_frame(th, iseq, type,
00613 self, GC_GUARDED_PTR(block->dfp),
00614 iseq->iseq_encoded + opt_pc, cfp->sp + arg_size, block->lfp,
00615 iseq->local_size - arg_size);
00616 ncfp->me = th->passed_me;
00617 th->passed_me = 0;
00618 th->passed_block = blockptr;
00619
00620 if (cref) {
00621 th->cfp->dfp[-1] = (VALUE)cref;
00622 }
00623
00624 return vm_exec(th);
00625 }
00626 else {
00627 return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
00628 }
00629 }
00630
00631 static inline const rb_block_t *
00632 check_block(rb_thread_t *th)
00633 {
00634 const rb_block_t *blockptr = GC_GUARDED_PTR_REF(th->cfp->lfp[0]);
00635
00636 if (blockptr == 0) {
00637 rb_vm_localjump_error("no block given", Qnil, 0);
00638 }
00639
00640 return blockptr;
00641 }
00642
00643 static inline VALUE
00644 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
00645 {
00646 const rb_block_t *blockptr = check_block(th);
00647 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref);
00648 }
00649
00650 static inline VALUE
00651 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
00652 {
00653 const rb_block_t *blockptr = check_block(th);
00654 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0);
00655 }
00656
00657 VALUE
00658 rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
00659 int argc, const VALUE *argv, const rb_block_t * blockptr)
00660 {
00661 VALUE val = Qundef;
00662 int state;
00663 volatile int stored_safe = th->safe_level;
00664
00665 TH_PUSH_TAG(th);
00666 if ((state = EXEC_TAG()) == 0) {
00667 if (!proc->is_from_method) {
00668 th->safe_level = proc->safe_level;
00669 }
00670 val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0);
00671 }
00672 TH_POP_TAG();
00673
00674 if (!proc->is_from_method) {
00675 th->safe_level = stored_safe;
00676 }
00677
00678 if (state) {
00679 JUMP_TAG(state);
00680 }
00681 return val;
00682 }
00683
00684
00685
00686 static rb_control_frame_t *
00687 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
00688 {
00689 while (cfp->pc == 0) {
00690 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00691 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00692 return 0;
00693 }
00694 }
00695 return cfp;
00696 }
00697
00698 static VALUE
00699 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
00700 {
00701 cfp = vm_normal_frame(th, cfp);
00702 return lfp_svar_get(th, cfp ? cfp->lfp : 0, key);
00703 }
00704
00705 static void
00706 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
00707 {
00708 cfp = vm_normal_frame(th, cfp);
00709 lfp_svar_set(th, cfp ? cfp->lfp : 0, key, val);
00710 }
00711
00712 static VALUE
00713 vm_svar_get(VALUE key)
00714 {
00715 rb_thread_t *th = GET_THREAD();
00716 return vm_cfp_svar_get(th, th->cfp, key);
00717 }
00718
00719 static void
00720 vm_svar_set(VALUE key, VALUE val)
00721 {
00722 rb_thread_t *th = GET_THREAD();
00723 vm_cfp_svar_set(th, th->cfp, key, val);
00724 }
00725
00726 VALUE
00727 rb_backref_get(void)
00728 {
00729 return vm_svar_get(1);
00730 }
00731
00732 void
00733 rb_backref_set(VALUE val)
00734 {
00735 vm_svar_set(1, val);
00736 }
00737
00738 VALUE
00739 rb_lastline_get(void)
00740 {
00741 return vm_svar_get(0);
00742 }
00743
00744 void
00745 rb_lastline_set(VALUE val)
00746 {
00747 vm_svar_set(0, val);
00748 }
00749
00750
00751
00752 int
00753 rb_vm_get_sourceline(const rb_control_frame_t *cfp)
00754 {
00755 int line_no = 0;
00756 const rb_iseq_t *iseq = cfp->iseq;
00757
00758 if (RUBY_VM_NORMAL_ISEQ_P(iseq) && iseq->insn_info_size > 0) {
00759 rb_num_t i;
00760 size_t pos = cfp->pc - cfp->iseq->iseq_encoded;
00761
00762 if (iseq->insn_info_table[0].position == pos) goto found;
00763 for (i = 1; i < iseq->insn_info_size; i++) {
00764 if (iseq->insn_info_table[i].position == pos) {
00765 line_no = iseq->insn_info_table[i - 1].line_no;
00766 goto found;
00767 }
00768 }
00769 line_no = iseq->insn_info_table[i - 1].line_no;
00770 }
00771 found:
00772 return line_no;
00773 }
00774
00775 static int
00776 vm_backtrace_each(rb_thread_t *th, int lev, void (*init)(void *), rb_backtrace_iter_func *iter, void *arg)
00777 {
00778 const rb_control_frame_t *limit_cfp = th->cfp;
00779 const rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
00780 VALUE file = Qnil;
00781 int line_no = 0;
00782
00783 cfp -= 2;
00784 while (lev-- >= 0) {
00785 if (++limit_cfp > cfp) {
00786 return FALSE;
00787 }
00788 }
00789 if (init) (*init)(arg);
00790 limit_cfp = RUBY_VM_NEXT_CONTROL_FRAME(limit_cfp);
00791 if (th->vm->progname) file = th->vm->progname;
00792 while (cfp > limit_cfp) {
00793 if (cfp->iseq != 0) {
00794 if (cfp->pc != 0) {
00795 rb_iseq_t *iseq = cfp->iseq;
00796
00797 line_no = rb_vm_get_sourceline(cfp);
00798 file = iseq->filename;
00799 if ((*iter)(arg, file, line_no, iseq->name)) break;
00800 }
00801 }
00802 else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
00803 ID id;
00804 extern VALUE ruby_engine_name;
00805
00806 if (NIL_P(file)) file = ruby_engine_name;
00807 if (cfp->me->def)
00808 id = cfp->me->def->original_id;
00809 else
00810 id = cfp->me->called_id;
00811 if (id != ID_ALLOCATOR && (*iter)(arg, file, line_no, rb_id2str(id)))
00812 break;
00813 }
00814 cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp);
00815 }
00816 return TRUE;
00817 }
00818
00819 static void
00820 vm_backtrace_alloc(void *arg)
00821 {
00822 VALUE *aryp = arg;
00823 *aryp = rb_ary_new();
00824 }
00825
00826 static int
00827 vm_backtrace_push(void *arg, VALUE file, int line_no, VALUE name)
00828 {
00829 VALUE *aryp = arg;
00830 VALUE bt;
00831
00832 if (line_no) {
00833 bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:%d:in `%s'",
00834 RSTRING_PTR(file), line_no, RSTRING_PTR(name));
00835 }
00836 else {
00837 bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:in `%s'",
00838 RSTRING_PTR(file), RSTRING_PTR(name));
00839 }
00840 rb_ary_push(*aryp, bt);
00841 return 0;
00842 }
00843
00844 static inline VALUE
00845 vm_backtrace(rb_thread_t *th, int lev)
00846 {
00847 VALUE ary = 0;
00848
00849 if (lev < 0) {
00850 ary = rb_ary_new();
00851 }
00852 vm_backtrace_each(th, lev, vm_backtrace_alloc, vm_backtrace_push, &ary);
00853 if (!ary) return Qnil;
00854 return rb_ary_reverse(ary);
00855 }
00856
00857 const char *
00858 rb_sourcefile(void)
00859 {
00860 rb_thread_t *th = GET_THREAD();
00861 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00862
00863 if (cfp) {
00864 return RSTRING_PTR(cfp->iseq->filename);
00865 }
00866 else {
00867 return 0;
00868 }
00869 }
00870
00871 int
00872 rb_sourceline(void)
00873 {
00874 rb_thread_t *th = GET_THREAD();
00875 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00876
00877 if (cfp) {
00878 return rb_vm_get_sourceline(cfp);
00879 }
00880 else {
00881 return 0;
00882 }
00883 }
00884
00885 NODE *
00886 rb_vm_cref(void)
00887 {
00888 rb_thread_t *th = GET_THREAD();
00889 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00890
00891 if (cfp == 0) {
00892 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00893 }
00894 return vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
00895 }
00896
00897 #if 0
00898 void
00899 debug_cref(NODE *cref)
00900 {
00901 while (cref) {
00902 dp(cref->nd_clss);
00903 printf("%ld\n", cref->nd_visi);
00904 cref = cref->nd_next;
00905 }
00906 }
00907 #endif
00908
00909 VALUE
00910 rb_vm_cbase(void)
00911 {
00912 rb_thread_t *th = GET_THREAD();
00913 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00914
00915 if (cfp == 0) {
00916 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00917 }
00918 return vm_get_cbase(cfp->iseq, cfp->lfp, cfp->dfp);
00919 }
00920
00921
00922
00923 static VALUE
00924 make_localjump_error(const char *mesg, VALUE value, int reason)
00925 {
00926 extern VALUE rb_eLocalJumpError;
00927 VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
00928 ID id;
00929
00930 switch (reason) {
00931 case TAG_BREAK:
00932 CONST_ID(id, "break");
00933 break;
00934 case TAG_REDO:
00935 CONST_ID(id, "redo");
00936 break;
00937 case TAG_RETRY:
00938 CONST_ID(id, "retry");
00939 break;
00940 case TAG_NEXT:
00941 CONST_ID(id, "next");
00942 break;
00943 case TAG_RETURN:
00944 CONST_ID(id, "return");
00945 break;
00946 default:
00947 CONST_ID(id, "noreason");
00948 break;
00949 }
00950 rb_iv_set(exc, "@exit_value", value);
00951 rb_iv_set(exc, "@reason", ID2SYM(id));
00952 return exc;
00953 }
00954
00955 void
00956 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
00957 {
00958 VALUE exc = make_localjump_error(mesg, value, reason);
00959 rb_exc_raise(exc);
00960 }
00961
00962 VALUE
00963 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
00964 {
00965 VALUE result = Qnil;
00966
00967 if (val == Qundef) {
00968 val = GET_THREAD()->tag->retval;
00969 }
00970 switch (state) {
00971 case 0:
00972 break;
00973 case TAG_RETURN:
00974 result = make_localjump_error("unexpected return", val, state);
00975 break;
00976 case TAG_BREAK:
00977 result = make_localjump_error("unexpected break", val, state);
00978 break;
00979 case TAG_NEXT:
00980 result = make_localjump_error("unexpected next", val, state);
00981 break;
00982 case TAG_REDO:
00983 result = make_localjump_error("unexpected redo", Qnil, state);
00984 break;
00985 case TAG_RETRY:
00986 result = make_localjump_error("retry outside of rescue clause", Qnil, state);
00987 break;
00988 default:
00989 break;
00990 }
00991 return result;
00992 }
00993
00994 void
00995 rb_vm_jump_tag_but_local_jump(int state, VALUE val)
00996 {
00997 if (val != Qnil) {
00998 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, val);
00999 rb_exc_raise(exc);
01000 }
01001 JUMP_TAG(state);
01002 }
01003
01004 NORETURN(static void vm_iter_break(rb_thread_t *th));
01005
01006 static void
01007 vm_iter_break(rb_thread_t *th)
01008 {
01009 rb_control_frame_t *cfp = th->cfp;
01010 VALUE *dfp = GC_GUARDED_PTR_REF(*cfp->dfp);
01011
01012 th->state = TAG_BREAK;
01013 th->errinfo = (VALUE)NEW_THROW_OBJECT(Qnil, (VALUE)dfp, TAG_BREAK);
01014 TH_JUMP_TAG(th, TAG_BREAK);
01015 }
01016
01017 void
01018 rb_iter_break(void)
01019 {
01020 vm_iter_break(GET_THREAD());
01021 }
01022
01023
01024
01025 static st_table *vm_opt_method_table = 0;
01026
01027 static void
01028 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me)
01029 {
01030 st_data_t bop;
01031 if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
01032 if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
01033 ruby_vm_redefined_flag[bop] = 1;
01034 }
01035 }
01036 }
01037
01038 static void
01039 add_opt_method(VALUE klass, ID mid, VALUE bop)
01040 {
01041 rb_method_entry_t *me;
01042 if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
01043 me->def->type == VM_METHOD_TYPE_CFUNC) {
01044 st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
01045 }
01046 else {
01047 rb_bug("undefined optimized method: %s", rb_id2name(mid));
01048 }
01049 }
01050
01051 static void
01052 vm_init_redefined_flag(void)
01053 {
01054 ID mid;
01055 VALUE bop;
01056
01057 vm_opt_method_table = st_init_numtable();
01058
01059 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
01060 #define C(k) add_opt_method(rb_c##k, mid, bop)
01061 OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
01062 OP(MINUS, MINUS), (C(Fixnum));
01063 OP(MULT, MULT), (C(Fixnum), C(Float));
01064 OP(DIV, DIV), (C(Fixnum), C(Float));
01065 OP(MOD, MOD), (C(Fixnum), C(Float));
01066 OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
01067 OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
01068 OP(LT, LT), (C(Fixnum));
01069 OP(LE, LE), (C(Fixnum));
01070 OP(LTLT, LTLT), (C(String), C(Array));
01071 OP(AREF, AREF), (C(Array), C(Hash));
01072 OP(ASET, ASET), (C(Array), C(Hash));
01073 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
01074 OP(Size, SIZE), (C(Array), C(String), C(Hash));
01075 OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
01076 OP(GT, GT), (C(Fixnum));
01077 OP(GE, GE), (C(Fixnum));
01078 #undef C
01079 #undef OP
01080 }
01081
01082
01083
01084 #if VMDEBUG
01085 static const char *
01086 vm_frametype_name(const rb_control_frame_t *cfp)
01087 {
01088 switch (VM_FRAME_TYPE(cfp)) {
01089 case VM_FRAME_MAGIC_METHOD: return "method";
01090 case VM_FRAME_MAGIC_BLOCK: return "block";
01091 case VM_FRAME_MAGIC_CLASS: return "class";
01092 case VM_FRAME_MAGIC_TOP: return "top";
01093 case VM_FRAME_MAGIC_FINISH: return "finish";
01094 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
01095 case VM_FRAME_MAGIC_PROC: return "proc";
01096 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
01097 case VM_FRAME_MAGIC_EVAL: return "eval";
01098 case VM_FRAME_MAGIC_LAMBDA: return "lambda";
01099 default:
01100 rb_bug("unknown frame");
01101 }
01102 }
01103 #endif
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129
01130
01131
01132
01133
01134
01135
01136
01137
01138
01139
01140
01141
01142
01143
01144
01145
01146
01147
01148
01149
01150
01151
01152
01153
01154
01155
01156
01157
01158
01159
01160
01161
01162
01163
01164
01165
01166
01167
01168
01169
01170
01171
01172
01173
01174
01175
01176
01177
01178
01179
01180
01181
01182
01183
01184
01185
01186
01187
01188
01189
01190
01191
01192
01193
01194
01195
01196
01197
01198
01199
01200
01201
01202
01203
01204
01205
01206
01207
01208 static VALUE
01209 vm_exec(rb_thread_t *th)
01210 {
01211 int state;
01212 VALUE result, err;
01213 VALUE initial = 0;
01214 VALUE *escape_dfp = NULL;
01215
01216 TH_PUSH_TAG(th);
01217 _tag.retval = Qnil;
01218 if ((state = EXEC_TAG()) == 0) {
01219 vm_loop_start:
01220 result = vm_exec_core(th, initial);
01221 if ((state = th->state) != 0) {
01222 err = result;
01223 th->state = 0;
01224 goto exception_handler;
01225 }
01226 }
01227 else {
01228 int i;
01229 struct iseq_catch_table_entry *entry;
01230 unsigned long epc, cont_pc, cont_sp;
01231 VALUE catch_iseqval;
01232 rb_control_frame_t *cfp;
01233 VALUE type;
01234
01235 err = th->errinfo;
01236
01237 exception_handler:
01238 cont_pc = cont_sp = catch_iseqval = 0;
01239
01240 while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
01241 if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
01242 const rb_method_entry_t *me = th->cfp->me;
01243 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass);
01244 }
01245 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01246 }
01247
01248 cfp = th->cfp;
01249 epc = cfp->pc - cfp->iseq->iseq_encoded;
01250
01251 if (state == TAG_BREAK || state == TAG_RETURN) {
01252 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01253
01254 if (cfp->dfp == escape_dfp) {
01255 if (state == TAG_RETURN) {
01256 if ((cfp + 1)->pc != &finish_insn_seq[0]) {
01257 SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->dfp);
01258 SET_THROWOBJ_STATE(err, state = TAG_BREAK);
01259 }
01260 else {
01261 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01262 entry = &cfp->iseq->catch_table[i];
01263 if (entry->start < epc && entry->end >= epc) {
01264 if (entry->type == CATCH_TYPE_ENSURE) {
01265 catch_iseqval = entry->iseq;
01266 cont_pc = entry->cont;
01267 cont_sp = entry->sp;
01268 break;
01269 }
01270 }
01271 }
01272 if (!catch_iseqval) {
01273 result = GET_THROWOBJ_VAL(err);
01274 th->errinfo = Qnil;
01275 th->cfp += 2;
01276 goto finish_vme;
01277 }
01278 }
01279
01280 }
01281 else {
01282
01283 #if OPT_STACK_CACHING
01284 initial = (GET_THROWOBJ_VAL(err));
01285 #else
01286 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01287 #endif
01288 th->errinfo = Qnil;
01289 goto vm_loop_start;
01290 }
01291 }
01292 }
01293
01294 if (state == TAG_RAISE) {
01295 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01296 entry = &cfp->iseq->catch_table[i];
01297 if (entry->start < epc && entry->end >= epc) {
01298
01299 if (entry->type == CATCH_TYPE_RESCUE ||
01300 entry->type == CATCH_TYPE_ENSURE) {
01301 catch_iseqval = entry->iseq;
01302 cont_pc = entry->cont;
01303 cont_sp = entry->sp;
01304 break;
01305 }
01306 }
01307 }
01308 }
01309 else if (state == TAG_RETRY) {
01310 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01311 entry = &cfp->iseq->catch_table[i];
01312 if (entry->start < epc && entry->end >= epc) {
01313
01314 if (entry->type == CATCH_TYPE_ENSURE) {
01315 catch_iseqval = entry->iseq;
01316 cont_pc = entry->cont;
01317 cont_sp = entry->sp;
01318 break;
01319 }
01320 else if (entry->type == CATCH_TYPE_RETRY) {
01321 VALUE *escape_dfp;
01322 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01323 if (cfp->dfp == escape_dfp) {
01324 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01325 th->errinfo = Qnil;
01326 goto vm_loop_start;
01327 }
01328 }
01329 }
01330 }
01331 }
01332 else if (state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0) {
01333 type = CATCH_TYPE_BREAK;
01334
01335 search_restart_point:
01336 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01337 entry = &cfp->iseq->catch_table[i];
01338
01339 if (entry->start < epc && entry->end >= epc) {
01340 if (entry->type == CATCH_TYPE_ENSURE) {
01341 catch_iseqval = entry->iseq;
01342 cont_pc = entry->cont;
01343 cont_sp = entry->sp;
01344 break;
01345 }
01346 else if (entry->type == type) {
01347 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01348 cfp->sp = cfp->bp + entry->sp;
01349
01350 if (state != TAG_REDO) {
01351 #if OPT_STACK_CACHING
01352 initial = (GET_THROWOBJ_VAL(err));
01353 #else
01354 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01355 #endif
01356 }
01357 th->errinfo = Qnil;
01358 goto vm_loop_start;
01359 }
01360 }
01361 }
01362 }
01363 else if (state == TAG_REDO) {
01364 type = CATCH_TYPE_REDO;
01365 goto search_restart_point;
01366 }
01367 else if (state == TAG_NEXT) {
01368 type = CATCH_TYPE_NEXT;
01369 goto search_restart_point;
01370 }
01371 else {
01372 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01373 entry = &cfp->iseq->catch_table[i];
01374 if (entry->start < epc && entry->end >= epc) {
01375
01376 if (entry->type == CATCH_TYPE_ENSURE) {
01377 catch_iseqval = entry->iseq;
01378 cont_pc = entry->cont;
01379 cont_sp = entry->sp;
01380 break;
01381 }
01382 }
01383 }
01384 }
01385
01386 if (catch_iseqval != 0) {
01387
01388 rb_iseq_t *catch_iseq;
01389
01390
01391 GetISeqPtr(catch_iseqval, catch_iseq);
01392 cfp->sp = cfp->bp + cont_sp;
01393 cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
01394
01395
01396 cfp->sp[0] = err;
01397 vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
01398 cfp->self, (VALUE)cfp->dfp, catch_iseq->iseq_encoded,
01399 cfp->sp + 1 , cfp->lfp, catch_iseq->local_size - 1);
01400
01401 state = 0;
01402 th->state = 0;
01403 th->errinfo = Qnil;
01404 goto vm_loop_start;
01405 }
01406 else {
01407
01408
01409 switch (VM_FRAME_TYPE(th->cfp)) {
01410 case VM_FRAME_MAGIC_METHOD:
01411 EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0);
01412 break;
01413 case VM_FRAME_MAGIC_CLASS:
01414 EXEC_EVENT_HOOK(th, RUBY_EVENT_END, th->cfp->self, 0, 0);
01415 break;
01416 }
01417
01418 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01419
01420 if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_FINISH) {
01421 goto exception_handler;
01422 }
01423 else {
01424 vm_pop_frame(th);
01425 th->errinfo = err;
01426 TH_POP_TAG2();
01427 JUMP_TAG(state);
01428 }
01429 }
01430 }
01431 finish_vme:
01432 TH_POP_TAG();
01433 return result;
01434 }
01435
01436
01437
01438 VALUE
01439 rb_iseq_eval(VALUE iseqval)
01440 {
01441 rb_thread_t *th = GET_THREAD();
01442 VALUE val;
01443 volatile VALUE tmp;
01444
01445 vm_set_top_stack(th, iseqval);
01446
01447 val = vm_exec(th);
01448 tmp = iseqval;
01449 return val;
01450 }
01451
01452 VALUE
01453 rb_iseq_eval_main(VALUE iseqval)
01454 {
01455 rb_thread_t *th = GET_THREAD();
01456 VALUE val;
01457 volatile VALUE tmp;
01458
01459 vm_set_main_stack(th, iseqval);
01460
01461 val = vm_exec(th);
01462 tmp = iseqval;
01463 return val;
01464 }
01465
01466 int
01467 rb_thread_method_id_and_class(rb_thread_t *th,
01468 ID *idp, VALUE *klassp)
01469 {
01470 rb_control_frame_t *cfp = th->cfp;
01471 rb_iseq_t *iseq = cfp->iseq;
01472 if (!iseq && cfp->me) {
01473 if (idp) *idp = cfp->me->def->original_id;
01474 if (klassp) *klassp = cfp->me->klass;
01475 return 1;
01476 }
01477 while (iseq) {
01478 if (RUBY_VM_IFUNC_P(iseq)) {
01479 if (idp) CONST_ID(*idp, "<ifunc>");
01480 if (klassp) *klassp = 0;
01481 return 1;
01482 }
01483 if (iseq->defined_method_id) {
01484 if (idp) *idp = iseq->defined_method_id;
01485 if (klassp) *klassp = iseq->klass;
01486 return 1;
01487 }
01488 if (iseq->local_iseq == iseq) {
01489 break;
01490 }
01491 iseq = iseq->parent_iseq;
01492 }
01493 return 0;
01494 }
01495
01496 int
01497 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
01498 {
01499 return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
01500 }
01501
01502 VALUE
01503 rb_thread_current_status(const rb_thread_t *th)
01504 {
01505 const rb_control_frame_t *cfp = th->cfp;
01506 VALUE str = Qnil;
01507
01508 if (cfp->iseq != 0) {
01509 if (cfp->pc != 0) {
01510 rb_iseq_t *iseq = cfp->iseq;
01511 int line_no = rb_vm_get_sourceline(cfp);
01512 char *file = RSTRING_PTR(iseq->filename);
01513 str = rb_sprintf("%s:%d:in `%s'",
01514 file, line_no, RSTRING_PTR(iseq->name));
01515 }
01516 }
01517 else if (cfp->me->def->original_id) {
01518 str = rb_sprintf("`%s#%s' (cfunc)",
01519 rb_class2name(cfp->me->klass),
01520 rb_id2name(cfp->me->def->original_id));
01521 }
01522
01523 return str;
01524 }
01525
01526 VALUE
01527 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
01528 const rb_block_t *blockptr, VALUE filename)
01529 {
01530 rb_thread_t *th = GET_THREAD();
01531 const rb_control_frame_t *reg_cfp = th->cfp;
01532 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
01533 VALUE val;
01534
01535 vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP,
01536 recv, (VALUE)blockptr, 0, reg_cfp->sp, 0, 1);
01537
01538 val = (*func)(arg);
01539
01540 vm_pop_frame(th);
01541 return val;
01542 }
01543
01544
01545
01546 static int
01547 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
01548 {
01549 VALUE thval = (VALUE)key;
01550 rb_gc_mark(thval);
01551 return ST_CONTINUE;
01552 }
01553
01554 static void
01555 mark_event_hooks(rb_event_hook_t *hook)
01556 {
01557 while (hook) {
01558 rb_gc_mark(hook->data);
01559 hook = hook->next;
01560 }
01561 }
01562
01563 void
01564 rb_vm_mark(void *ptr)
01565 {
01566 int i;
01567
01568 RUBY_MARK_ENTER("vm");
01569 RUBY_GC_INFO("-------------------------------------------------\n");
01570 if (ptr) {
01571 rb_vm_t *vm = ptr;
01572 if (vm->living_threads) {
01573 st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
01574 }
01575 RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
01576 RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
01577 RUBY_MARK_UNLESS_NULL(vm->load_path);
01578 RUBY_MARK_UNLESS_NULL(vm->loaded_features);
01579 RUBY_MARK_UNLESS_NULL(vm->top_self);
01580 RUBY_MARK_UNLESS_NULL(vm->coverages);
01581 rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
01582
01583 if (vm->loading_table) {
01584 rb_mark_tbl(vm->loading_table);
01585 }
01586
01587 mark_event_hooks(vm->event_hooks);
01588
01589 for (i = 0; i < RUBY_NSIG; i++) {
01590 if (vm->trap_list[i].cmd)
01591 rb_gc_mark(vm->trap_list[i].cmd);
01592 }
01593 }
01594
01595 RUBY_MARK_LEAVE("vm");
01596 }
01597
01598 #define vm_free 0
01599
01600 int
01601 ruby_vm_destruct(rb_vm_t *vm)
01602 {
01603 RUBY_FREE_ENTER("vm");
01604 if (vm) {
01605 rb_thread_t *th = vm->main_thread;
01606 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01607 struct rb_objspace *objspace = vm->objspace;
01608 #endif
01609 rb_gc_force_recycle(vm->self);
01610 vm->main_thread = 0;
01611 if (th) {
01612 rb_fiber_reset_root_local_storage(th->self);
01613 thread_free(th);
01614 }
01615 if (vm->living_threads) {
01616 st_free_table(vm->living_threads);
01617 vm->living_threads = 0;
01618 }
01619 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01620 if (objspace) {
01621 rb_objspace_free(objspace);
01622 }
01623 #endif
01624 ruby_vm_run_at_exit_hooks(vm);
01625 rb_vm_gvl_destroy(vm);
01626 ruby_xfree(vm);
01627 ruby_current_vm = 0;
01628 }
01629 RUBY_FREE_LEAVE("vm");
01630 return 0;
01631 }
01632
01633 static size_t
01634 vm_memsize(const void *ptr)
01635 {
01636 if (ptr) {
01637 const rb_vm_t *vmobj = ptr;
01638 return sizeof(rb_vm_t) + st_memsize(vmobj->living_threads);
01639 }
01640 else {
01641 return 0;
01642 }
01643 }
01644
01645 static const rb_data_type_t vm_data_type = {
01646 "VM",
01647 {rb_vm_mark, vm_free, vm_memsize,},
01648 };
01649
01650 static void
01651 vm_init2(rb_vm_t *vm)
01652 {
01653 MEMZERO(vm, rb_vm_t, 1);
01654 vm->src_encoding_index = -1;
01655 vm->at_exit.basic.flags = (T_ARRAY | RARRAY_EMBED_FLAG) & ~RARRAY_EMBED_LEN_MASK;
01656 vm->at_exit.basic.klass = 0;
01657 }
01658
01659
01660
01661 #define USE_THREAD_DATA_RECYCLE 1
01662
01663 #if USE_THREAD_DATA_RECYCLE
01664 #define RECYCLE_MAX 64
01665 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
01666 static int thread_recycle_stack_count = 0;
01667
01668 static VALUE *
01669 thread_recycle_stack(size_t size)
01670 {
01671 if (thread_recycle_stack_count) {
01672 return thread_recycle_stack_slot[--thread_recycle_stack_count];
01673 }
01674 else {
01675 return ALLOC_N(VALUE, size);
01676 }
01677 }
01678
01679 #else
01680 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
01681 #endif
01682
01683 void
01684 rb_thread_recycle_stack_release(VALUE *stack)
01685 {
01686 #if USE_THREAD_DATA_RECYCLE
01687 if (thread_recycle_stack_count < RECYCLE_MAX) {
01688 thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
01689 return;
01690 }
01691 #endif
01692 ruby_xfree(stack);
01693 }
01694
01695 #ifdef USE_THREAD_RECYCLE
01696 static rb_thread_t *
01697 thread_recycle_struct(void)
01698 {
01699 void *p = ALLOC_N(rb_thread_t, 1);
01700 memset(p, 0, sizeof(rb_thread_t));
01701 return p;
01702 }
01703 #endif
01704
01705 void
01706 rb_thread_mark(void *ptr)
01707 {
01708 rb_thread_t *th = NULL;
01709 RUBY_MARK_ENTER("thread");
01710 if (ptr) {
01711 th = ptr;
01712 if (th->stack) {
01713 VALUE *p = th->stack;
01714 VALUE *sp = th->cfp->sp;
01715 rb_control_frame_t *cfp = th->cfp;
01716 rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
01717
01718 while (p < sp) {
01719 rb_gc_mark(*p++);
01720 }
01721 rb_gc_mark_locations(p, p + th->mark_stack_len);
01722
01723 while (cfp != limit_cfp) {
01724 rb_iseq_t *iseq = cfp->iseq;
01725 rb_gc_mark(cfp->proc);
01726 rb_gc_mark(cfp->self);
01727 if (iseq) {
01728 rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
01729 }
01730 if (cfp->me) ((rb_method_entry_t *)cfp->me)->mark = 1;
01731 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01732 }
01733 }
01734
01735
01736 RUBY_MARK_UNLESS_NULL(th->first_proc);
01737 if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
01738
01739 RUBY_MARK_UNLESS_NULL(th->thgroup);
01740 RUBY_MARK_UNLESS_NULL(th->value);
01741 RUBY_MARK_UNLESS_NULL(th->errinfo);
01742 RUBY_MARK_UNLESS_NULL(th->thrown_errinfo);
01743 RUBY_MARK_UNLESS_NULL(th->local_svar);
01744 RUBY_MARK_UNLESS_NULL(th->top_self);
01745 RUBY_MARK_UNLESS_NULL(th->top_wrapper);
01746 RUBY_MARK_UNLESS_NULL(th->fiber);
01747 RUBY_MARK_UNLESS_NULL(th->root_fiber);
01748 RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
01749 RUBY_MARK_UNLESS_NULL(th->last_status);
01750
01751 RUBY_MARK_UNLESS_NULL(th->locking_mutex);
01752
01753 rb_mark_tbl(th->local_storage);
01754
01755 if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
01756 rb_gc_mark_machine_stack(th);
01757 rb_gc_mark_locations((VALUE *)&th->machine_regs,
01758 (VALUE *)(&th->machine_regs) +
01759 sizeof(th->machine_regs) / sizeof(VALUE));
01760 }
01761
01762 mark_event_hooks(th->event_hooks);
01763 }
01764
01765 RUBY_MARK_LEAVE("thread");
01766 }
01767
01768 static void
01769 thread_free(void *ptr)
01770 {
01771 rb_thread_t *th;
01772 RUBY_FREE_ENTER("thread");
01773
01774 if (ptr) {
01775 th = ptr;
01776
01777 if (!th->root_fiber) {
01778 RUBY_FREE_UNLESS_NULL(th->stack);
01779 }
01780
01781 if (th->locking_mutex != Qfalse) {
01782 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
01783 }
01784 if (th->keeping_mutexes != NULL) {
01785 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
01786 }
01787
01788 if (th->local_storage) {
01789 st_free_table(th->local_storage);
01790 }
01791
01792 if (th->vm && th->vm->main_thread == th) {
01793 RUBY_GC_INFO("main thread\n");
01794 }
01795 else {
01796 #ifdef USE_SIGALTSTACK
01797 if (th->altstack) {
01798 free(th->altstack);
01799 }
01800 #endif
01801 ruby_xfree(ptr);
01802 }
01803 if (ruby_current_thread == th)
01804 ruby_current_thread = NULL;
01805 }
01806 RUBY_FREE_LEAVE("thread");
01807 }
01808
01809 static size_t
01810 thread_memsize(const void *ptr)
01811 {
01812 if (ptr) {
01813 const rb_thread_t *th = ptr;
01814 size_t size = sizeof(rb_thread_t);
01815
01816 if (!th->root_fiber) {
01817 size += th->stack_size * sizeof(VALUE);
01818 }
01819 if (th->local_storage) {
01820 size += st_memsize(th->local_storage);
01821 }
01822 return size;
01823 }
01824 else {
01825 return 0;
01826 }
01827 }
01828
01829 #define thread_data_type ruby_threadptr_data_type
01830 const rb_data_type_t ruby_threadptr_data_type = {
01831 "VM/thread",
01832 {
01833 rb_thread_mark,
01834 thread_free,
01835 thread_memsize,
01836 },
01837 };
01838
01839 VALUE
01840 rb_obj_is_thread(VALUE obj)
01841 {
01842 if (rb_typeddata_is_kind_of(obj, &thread_data_type)) {
01843 return Qtrue;
01844 }
01845 else {
01846 return Qfalse;
01847 }
01848 }
01849
01850 static VALUE
01851 thread_alloc(VALUE klass)
01852 {
01853 VALUE volatile obj;
01854 #ifdef USE_THREAD_RECYCLE
01855 rb_thread_t *th = thread_recycle_struct();
01856 obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
01857 #else
01858 rb_thread_t *th;
01859 obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
01860 #endif
01861 return obj;
01862 }
01863
01864 static void
01865 th_init(rb_thread_t *th, VALUE self)
01866 {
01867 th->self = self;
01868
01869
01870 #ifdef USE_SIGALTSTACK
01871
01872 th->altstack = malloc(ALT_STACK_SIZE);
01873 #endif
01874 th->stack_size = RUBY_VM_THREAD_STACK_SIZE;
01875 th->stack = thread_recycle_stack(th->stack_size);
01876
01877 th->cfp = (void *)(th->stack + th->stack_size);
01878
01879 vm_push_frame(th, 0, VM_FRAME_MAGIC_TOP, Qnil, 0, 0,
01880 th->stack, 0, 1);
01881
01882 th->status = THREAD_RUNNABLE;
01883 th->errinfo = Qnil;
01884 th->last_status = Qnil;
01885 th->waiting_fd = -1;
01886 }
01887
01888 static VALUE
01889 ruby_thread_init(VALUE self)
01890 {
01891 rb_thread_t *th;
01892 rb_vm_t *vm = GET_THREAD()->vm;
01893 GetThreadPtr(self, th);
01894
01895 th_init(th, self);
01896 th->vm = vm;
01897
01898 th->top_wrapper = 0;
01899 th->top_self = rb_vm_top_self();
01900 return self;
01901 }
01902
01903 VALUE
01904 rb_thread_alloc(VALUE klass)
01905 {
01906 VALUE self = thread_alloc(klass);
01907 ruby_thread_init(self);
01908 return self;
01909 }
01910
01911 static void
01912 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
01913 rb_num_t is_singleton, NODE *cref)
01914 {
01915 VALUE klass = cref->nd_clss;
01916 int noex = (int)cref->nd_visi;
01917 rb_iseq_t *miseq;
01918 GetISeqPtr(iseqval, miseq);
01919
01920 if (miseq->klass) {
01921 iseqval = rb_iseq_clone(iseqval, 0);
01922 RB_GC_GUARD(iseqval);
01923 GetISeqPtr(iseqval, miseq);
01924 }
01925
01926 if (NIL_P(klass)) {
01927 rb_raise(rb_eTypeError, "no class/module to add method");
01928 }
01929
01930 if (is_singleton) {
01931 if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
01932 rb_raise(rb_eTypeError,
01933 "can't define singleton method \"%s\" for %s",
01934 rb_id2name(id), rb_obj_classname(obj));
01935 }
01936
01937 rb_check_frozen(obj);
01938 klass = rb_singleton_class(obj);
01939 noex = NOEX_PUBLIC;
01940 }
01941
01942
01943 COPY_CREF(miseq->cref_stack, cref);
01944 miseq->cref_stack->nd_visi = NOEX_PUBLIC;
01945 miseq->klass = klass;
01946 miseq->defined_method_id = id;
01947 rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
01948
01949 if (!is_singleton && noex == NOEX_MODFUNC) {
01950 rb_add_method(rb_singleton_class(klass), id, VM_METHOD_TYPE_ISEQ, miseq, NOEX_PUBLIC);
01951 }
01952 INC_VM_STATE_VERSION();
01953 }
01954
01955 #define REWIND_CFP(expr) do { \
01956 rb_thread_t *th__ = GET_THREAD(); \
01957 th__->cfp++; expr; th__->cfp--; \
01958 } while (0)
01959
01960 static VALUE
01961 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01962 {
01963 REWIND_CFP({
01964 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
01965 });
01966 return Qnil;
01967 }
01968
01969 static VALUE
01970 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01971 {
01972 REWIND_CFP({
01973 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
01974 });
01975 return Qnil;
01976 }
01977
01978 static VALUE
01979 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
01980 {
01981 REWIND_CFP({
01982 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
01983 });
01984 return Qnil;
01985 }
01986
01987 static VALUE
01988 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
01989 {
01990 REWIND_CFP({
01991 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
01992 });
01993 return Qnil;
01994 }
01995
01996 static VALUE
01997 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
01998 {
01999 REWIND_CFP({
02000 rb_undef(cbase, SYM2ID(sym));
02001 INC_VM_STATE_VERSION();
02002 });
02003 return Qnil;
02004 }
02005
02006 static VALUE
02007 m_core_set_postexe(VALUE self, VALUE iseqval)
02008 {
02009 REWIND_CFP({
02010 rb_iseq_t *blockiseq;
02011 rb_block_t *blockptr;
02012 rb_thread_t *th = GET_THREAD();
02013 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
02014 VALUE proc;
02015
02016 if (cfp == 0) {
02017 rb_bug("m_core_set_postexe: unreachable");
02018 }
02019
02020 GetISeqPtr(iseqval, blockiseq);
02021
02022 blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
02023 blockptr->iseq = blockiseq;
02024 blockptr->proc = 0;
02025
02026 proc = rb_vm_make_proc(th, blockptr, rb_cProc);
02027 rb_set_end_proc(rb_call_end_proc, proc);
02028 });
02029 return Qnil;
02030 }
02031
02032 extern VALUE *rb_gc_stack_start;
02033 extern size_t rb_gc_stack_maxsize;
02034 #ifdef __ia64
02035 extern VALUE *rb_gc_register_stack_start;
02036 #endif
02037
02038
02039
02040
02041 static VALUE
02042 sdr(void)
02043 {
02044 rb_vm_bugreport();
02045 return Qnil;
02046 }
02047
02048
02049 static VALUE
02050 nsdr(void)
02051 {
02052 VALUE ary = rb_ary_new();
02053 #if HAVE_BACKTRACE
02054 #include <execinfo.h>
02055 #define MAX_NATIVE_TRACE 1024
02056 static void *trace[MAX_NATIVE_TRACE];
02057 int n = backtrace(trace, MAX_NATIVE_TRACE);
02058 char **syms = backtrace_symbols(trace, n);
02059 int i;
02060
02061 if (syms == 0) {
02062 rb_memerror();
02063 }
02064
02065 for (i=0; i<n; i++) {
02066 rb_ary_push(ary, rb_str_new2(syms[i]));
02067 }
02068 free(syms);
02069 #endif
02070 return ary;
02071 }
02072
02073 void
02074 Init_VM(void)
02075 {
02076 VALUE opts;
02077 VALUE klass;
02078 VALUE fcore;
02079
02080
02081 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
02082 rb_undef_alloc_func(rb_cRubyVM);
02083 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
02084
02085
02086 fcore = rb_class_new(rb_cBasicObject);
02087 RBASIC(fcore)->flags = T_ICLASS;
02088 klass = rb_singleton_class(fcore);
02089 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
02090 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
02091 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
02092 rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
02093 rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
02094 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 1);
02095 rb_obj_freeze(fcore);
02096 rb_gc_register_mark_object(fcore);
02097 rb_mRubyVMFrozenCore = fcore;
02098
02099
02100 rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
02101 rb_undef_alloc_func(rb_cEnv);
02102 rb_undef_method(CLASS_OF(rb_cEnv), "new");
02103
02104
02105 rb_cThread = rb_define_class("Thread", rb_cObject);
02106 rb_undef_alloc_func(rb_cThread);
02107
02108
02109 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
02110 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
02111 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
02112 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
02113
02114 #if OPT_DIRECT_THREADED_CODE
02115 rb_ary_push(opts, rb_str_new2("direct threaded code"));
02116 #elif OPT_TOKEN_THREADED_CODE
02117 rb_ary_push(opts, rb_str_new2("token threaded code"));
02118 #elif OPT_CALL_THREADED_CODE
02119 rb_ary_push(opts, rb_str_new2("call threaded code"));
02120 #endif
02121
02122 #if OPT_STACK_CACHING
02123 rb_ary_push(opts, rb_str_new2("stack caching"));
02124 #endif
02125 #if OPT_OPERANDS_UNIFICATION
02126 rb_ary_push(opts, rb_str_new2("operands unification]"));
02127 #endif
02128 #if OPT_INSTRUCTIONS_UNIFICATION
02129 rb_ary_push(opts, rb_str_new2("instructions unification"));
02130 #endif
02131 #if OPT_INLINE_METHOD_CACHE
02132 rb_ary_push(opts, rb_str_new2("inline method cache"));
02133 #endif
02134 #if OPT_BLOCKINLINING
02135 rb_ary_push(opts, rb_str_new2("block inlining"));
02136 #endif
02137
02138
02139 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
02140
02141
02142 #if VMDEBUG
02143 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
02144 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
02145 #else
02146 (void)sdr;
02147 (void)nsdr;
02148 #endif
02149
02150
02151 {
02152 rb_vm_t *vm = ruby_current_vm;
02153 rb_thread_t *th = GET_THREAD();
02154 VALUE filename = rb_str_new2("<main>");
02155 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
02156 volatile VALUE th_self;
02157 rb_iseq_t *iseq;
02158
02159
02160 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
02161
02162
02163 th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
02164 vm->main_thread = th;
02165 vm->running_thread = th;
02166 th->vm = vm;
02167 th->top_wrapper = 0;
02168 th->top_self = rb_vm_top_self();
02169 rb_thread_set_current(th);
02170
02171 vm->living_threads = st_init_numtable();
02172 st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
02173
02174 rb_gc_register_mark_object(iseqval);
02175 GetISeqPtr(iseqval, iseq);
02176 th->cfp->iseq = iseq;
02177 th->cfp->pc = iseq->iseq_encoded;
02178 th->cfp->self = th->top_self;
02179
02180
02181
02182
02183 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
02184 }
02185 vm_init_redefined_flag();
02186 }
02187
02188 void
02189 rb_vm_set_progname(VALUE filename)
02190 {
02191 rb_thread_t *th = GET_VM()->main_thread;
02192 rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
02193 --cfp;
02194 cfp->iseq->filename = filename;
02195 }
02196
02197 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02198 struct rb_objspace *rb_objspace_alloc(void);
02199 #endif
02200
02201 void
02202 Init_BareVM(void)
02203 {
02204
02205 rb_vm_t * vm = malloc(sizeof(*vm));
02206 rb_thread_t * th = malloc(sizeof(*th));
02207 if (!vm || !th) {
02208 fprintf(stderr, "[FATAL] failed to allocate memory\n");
02209 exit(EXIT_FAILURE);
02210 }
02211 MEMZERO(th, rb_thread_t, 1);
02212
02213 rb_thread_set_current_raw(th);
02214
02215 vm_init2(vm);
02216 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02217 vm->objspace = rb_objspace_alloc();
02218 #endif
02219 ruby_current_vm = vm;
02220
02221 Init_native_thread();
02222 th_init(th, 0);
02223 th->vm = vm;
02224 ruby_thread_init_stack(th);
02225 }
02226
02227
02228
02229 static VALUE
02230 main_to_s(VALUE obj)
02231 {
02232 return rb_str_new2("main");
02233 }
02234
02235 VALUE
02236 rb_vm_top_self(void)
02237 {
02238 return GET_VM()->top_self;
02239 }
02240
02241 void
02242 Init_top_self(void)
02243 {
02244 rb_vm_t *vm = GET_VM();
02245
02246 vm->top_self = rb_obj_alloc(rb_cObject);
02247 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
02248
02249
02250 vm->mark_object_ary = rb_ary_tmp_new(1);
02251 }
02252
02253 VALUE *
02254 ruby_vm_verbose_ptr(rb_vm_t *vm)
02255 {
02256 return &vm->verbose;
02257 }
02258
02259 VALUE *
02260 ruby_vm_debug_ptr(rb_vm_t *vm)
02261 {
02262 return &vm->debug;
02263 }
02264
02265 VALUE *
02266 rb_ruby_verbose_ptr(void)
02267 {
02268 return ruby_vm_verbose_ptr(GET_VM());
02269 }
02270
02271 VALUE *
02272 rb_ruby_debug_ptr(void)
02273 {
02274 return ruby_vm_debug_ptr(GET_VM());
02275 }
02276