Attachment 'gc_interface.txt'

Download

   1 /**
   2  * @file
   3  * GC interface exposed to VM.
   4  *
   5  *
   6  * In order to eliminate dependency on certain types such as (VTable *) we 
   7  * have eliminated them from this interface and replace them with (void *).
   8  * While this might appear to be unfortunate it allows us to eliminate any 
   9  * knowledge of the class and VTable structures that are not of interest 
  10  * to the GC.
  11  */
  12 
  13 
  14 /*
  15  * *****
  16  * *
  17  * *  Routines to support the initialization and termination of GC.
  18  * * 
  19  * *****
  20  */
  21 
  22 /**
  23  * Is called by VM to start GC initialization sequence.
  24  *
  25  * This function is expected to initialize the GC internal data structures.
  26  * The VM should call this *before* any other calls to this interface
  27  * The GC assumes that the VM is ready to support a GC if it 
  28  * calls this function.
  29  */
  30 GCExport void gc_init();
  31 
  32 /**
  33  * may be called at various points the VM decides are GC-safe.
  34  * The GC may ignore this, or it may force a root set enumeration, or it may
  35  * execute a full GC.
  36  *
  37  * @note Optional debug interface.
  38  */
  39 GCExport void gc_test_safepoint();
  40 
  41 
  42 
  43 
  44 /**
  45  * If the GC supports a "bump-the-pointer" style allocation, where the GC's
  46  * thread-local information contains a "current" pointer and a "limit" pointer,
  47  * then it should return TRUE, and it should set *offset_of_current to be the
  48  * offset into the GC thread block of the "current" pointer, and similar for
  49  * *offset_of_limit and the "limit" pointer.  If not, then it should return
  50  * FALSE.
  51  */
  52 GCExport Boolean gc_supports_frontier_allocation(unsigned *offset_of_current, unsigned *offset_of_limit);
  53 
  54 
  55 
  56 
  57 /**
  58  * This API is used by the VM to notify the GC that the
  59  * VM has completed bootstrapping and initialization, and 
  60  * is henceforth ready to field requests for enumerating 
  61  * live references.
  62  *
  63  * Prior to this function being called the GC might see some
  64  * strange sights such as NULL or incomplete vtables. The GC will
  65  * need to consider these as normal and work with the VM to ensure 
  66  * that bootstrapping works. This means that the GC will make few
  67  * demands on the VM prior to this routine being called.
  68  *
  69  * However, once called the GC will feel free to do 
  70  * stop-the-world collections and will assume that the entire
  71  * gc_import.h interface is available and fully functioning.
  72  *
  73  * If this routine is called twice the result is undefined.
  74  */
  75 GCExport void gc_vm_initialized();
  76 
  77 
  78 
  79 /**
  80  * This is called once the VM has no use for the heap or the 
  81  * garbage collector data structures. The assumption is that the 
  82  * VM is exiting but needs to give the GC time to run destructors 
  83  * and free up memory it has gotten from the OS.
  84  * After this routine has been called the VM can not relie on any
  85  * data structures created by the GC.
  86  *
  87  * Errors: If gc_enumerate_finalizable_objects has been called and
  88  *         gc_wrapup gc discovers an object that has not had it
  89  *         finalizer run then it will attempt to report an error.
  90  */
  91 GCExport void gc_wrapup();
  92 
  93 
  94 
  95 /**
  96  * Is called by the VM to enumerate the root reference.
  97  */
  98 GCExport void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned);
  99 
 100 /**
 101  * Resembles gc_add_root_set_entry() but is passed the address of a slot
 102  * containing a compressed reference.
 103  */
 104 GCExport void gc_add_compressed_root_set_entry(uint32 *ref, Boolean is_pinned);
 105 
 106 /**
 107  * Is called by the VM to enumerate weak root reference.
 108  *
 109  * @param slot An pointer to the slot, containing the weak root
 110  * @param is_pinned TRUE denotes that object pointed-to from this slot
 111  *        should not be moved during garbage collection.
 112  * @param is_short_weak TRUE means that the weak root must be cleared
 113  *        before object becomes eligible for finalization.
 114  */
 115 GCExport void gc_add_weak_root_set_entry(Managed_Object_Handle *slot, 
 116     Boolean is_pinned, Boolean is_short_weak);
 117 
 118 /*
 119  * *****
 120  * *
 121  * *  Routines to support the allocation and initialization of objects.
 122  * * 
 123  * *****
 124  */
 125 
 126 
 127 /**
 128  * This routine is the primary routine used to allocate objects. 
 129  * It assumes nothing about the state of the VM internal data 
 130  * structures or the runtime stack. If gc_malloc_or_null is able 
 131  * to allocate the object without invoking a GC or calling the VM
 132  * then it does so. It places p_vtable into the object, ensures 
 133  * that the object is zeroed and then returns a ManagedObject 
 134  * pointer to the object. If it is not able to allocate the object 
 135  * without invoking a GC then it returns NULL.
 136  *
 137  * @param size - the size of the object to allocate. If the high bit
 138  *               set then various constraints as described above are
 139  *               placed on the allocation of this object.
 140  * @param type - a pointer to the vtable of the class being 
 141  *                   allocated. This routine will place this value 
 142  *                   in the appropriate slot of the new object.
 143  * @param thread_pointer - a pointer to the GC's thread-local space.
 144  *
 145  * This is like gc_malloc_or_null, except that it passes a pointer to
 146  * the thread's GC-specific space as a third argument.  This prevents
 147  * the GC from having to immediately call vm_get_thread_curr_alloc_block()
 148  * as its first task.
 149  *
 150  * @note rename of gc_malloc_with_thread_pointer()
 151  */
 152 GCExport Managed_Object_Handle gc_alloc_fast(unsigned size, 
 153                                              Allocation_Handle type,
 154                                              void *thread_pointer);
 155 
 156 /**
 157  * This routine is used to allocate an object. See the above 
 158  * discussion on the overloading of size. {link allocation}
 159  *
 160  * @param size - the size of the object to allocate. If the high bit
 161  *               set then various constraints as described above are
 162  *               placed on the allocation of this object.
 163  * @param type - a pointer to the vtable of the class being allocated.
 164  *                   This routine will place this value in the 
 165  *                   appropriate slot of the new object.
 166  * @param thread_pointer - a pointer to the GC's thread-local space.
 167  * 
 168  * @note rename of gc_malloc_or_null_with_thread_pointer()
 169  */
 170 GCExport Managed_Object_Handle gc_alloc(unsigned size, 
 171                                         Allocation_Handle type,
 172                                         void *thread_pointer);
 173 
 174 
 175 /**
 176  * For bootstrapping situations, when we still don't have
 177  * a class for the object. This routine is only available prior to 
 178  * a call to the call gc_vm_initialized. If it is called after
 179  * the call to gc_vm_initialized then the results are undefined. 
 180  * The GC places NULL in the vtable slot of the newly allocated
 181  * object.
 182  * 
 183  * The object allocated will be pinned, not finalizable and not an array.
 184  *
 185  * @param size - the size of the object to allocate. The high bit
 186  *               will never be set on this argument.
 187  * @return The newly allocated object
 188  *
 189  * @note Will be renamed to gc_alloc_pinned_noclass() to comply with 
 190  *       accepted naming conventions.
 191  */
 192 GCExport Managed_Object_Handle gc_pinned_malloc_noclass(unsigned size);
 193 
 194 
 195 /*
 196  * *****
 197  * *
 198  * *  Routines to support write barriers.
 199  * * 
 200  * *****
 201  */
 202 
 203 /**
 204  * Returns TRUE if the GC requires write barriers before every store to
 205  * a field of a reference tpe.
 206  */
 207 GCExport Boolean gc_requires_barriers();
 208 
 209 
 210 
 211 /*
 212  * *****
 213  * *
 214  * *  Routines to support threads.
 215  * * 
 216  * *****
 217  */
 218 
 219 /**
 220  * This routine is called during thread startup to set
 221  * an initial nursery for the thread.
 222  *
 223  * @note gc_thread_init and gc_thread_kill assume that
 224  *           the current thread is the one we are interested in
 225  *           If we passed in the thread then these things could be
 226  *           cross inited and cross killed.
 227  */
 228 GCExport void gc_thread_init(void *gc_information);
 229 
 230 
 231 
 232 /**
 233  * This is called just before the thread is reclaimed.
 234  */
 235 GCExport void gc_thread_kill(void *gc_information);
 236 
 237 /** 
 238  * Opaque handle for threads.
 239  */
 240 typedef void* Thread_Handle;     
 241 
 242 
 243 /*
 244  * *****
 245  * *
 246  * *  Routines to support the functionality required by the Java language specification.
 247  * * 
 248  * *****
 249  */
 250 
 251 /**
 252  * API for the VM to force a GC, typically in response to a call to 
 253  * java.lang.Runtime.gc
 254  */
 255 GCExport void gc_force_gc();
 256 
 257 
 258 
 259 /**
 260  * API for the VM to determine the total GC heap, typically in response to a
 261  * call to java.lang.Runtime.totalMemory
 262  */
 263 GCExport int64 gc_total_memory();
 264 
 265 
 266 
 267 /**
 268  * API for the VM to get an approximate view of the free space, 
 269  * typically in response to a call to java.lang.Runtime.freeMemory
 270  */
 271 GCExport int64 gc_free_memory();
 272 
 273 
 274 /**
 275  * returns TRUE if the object is pinned.
 276  * Routine to support the functionality required by JNI to see if an object is pinned.
 277  */
 278 GCExport Boolean gc_is_object_pinned (Managed_Object_Handle obj);
 279 
 280 
 281 /*
 282  * *****
 283  * *
 284  * *  Routines to handle the GC area in the VTable.
 285  * * 
 286  * *****
 287  */
 288 
 289 /**
 290  * The VM calls this function after a new class has been prepared.
 291  * The GC can use a call interface to gather relevant information about
 292  * that class and store it in area of the VTable that is reserved for GC.
 293  * The information cached in the VTable should be used by the GC in
 294  * performance sensitive functions like object scanning.
 295  */
 296 GCExport void gc_class_prepared(Class_Handle ch, VTable_Handle vth);
 297 
 298 
 299 /**
 300  * granularity of object alignment.
 301  *
 302  * Objects are aligned on 4 or 8 bytes. If they are aligned on 8 bytes then
 303  * Arrays will be required to start on the indicated alignement. This means that
 304  * for 8 byte alignment on the IA32 the header will look like this
 305  *
 306  * uint32 gc_header_lock_hash
 307  * VTable *vt
 308  * uint32 array_length
 309  * uint32 padding
 310  * the array elements.
 311  */
 312 #ifdef POINTER64
 313 #define GC_OBJECT_ALIGNMENT 8
 314 #else
 315 #define GC_OBJECT_ALIGNMENT 4
 316 #endif
 317 
 318 
 319 /*
 320  * *****
 321  * *
 322  * *  Routines to support various write barriers.
 323  * * 
 324  * *****
 325  */
 326 
 327 
 328 /**
 329  * Returns TRUE if references within objects and vector elements are to be
 330  * treated as offsets rather than raw pointers.
 331  */
 332 GCExport Boolean gc_supports_compressed_references();
 333 
 334 /**
 335  * These interfaces are marked for replacement for the IPF by the following
 336  * gc_heap_write_mumble interface.
 337  *
 338  * @deprecated Will be removed soon.
 339  */
 340 GCExport void gc_write_barrier(Managed_Object_Handle p_base_of_obj_with_slot);
 341 
 342 /**
 343  * There are two flavors for historical reasons. The compiler for IA32 will
 344  * produce code for the version using an offset.
 345  *
 346  * @deprecated Will be removed soon.
 347  */
 348 GCExport void gc_heap_wrote_object (Managed_Object_Handle p_base_of_object_just_written);
 349 
 350 
 351 /**
 352  * by calling this function VM notifies GC that a heap reference was written to
 353  * global slot.
 354  *
 355  * There are some global slots that are shared by different threads. Sapphire 
 356  * needs to know about writes to these slots. One example of such slots is in
 357  * the string pools used by the class loader. 
 358  */
 359 GCExport void gc_heap_write_global_slot(Managed_Object_Handle *p_slot,
 360                                         Managed_Object_Handle value);
 361 
 362 /**
 363  * VM should call this function on heap reference writes to global slots.
 364  *
 365  * The "compressed" versions of the functions support updates to slots containing 
 366  * compressed references that are heap offsets; these functions handle details of 
 367  * converting raw reference pointers to compressed references before updating slots.
 368  */
 369 GCExport void gc_heap_write_global_slot_compressed(uint32 *p_slot,
 370                                                    Managed_Object_Handle value);
 371 
 372 /**
 373  * VM should call this function on heap reference writes to heap objects.
 374  */
 375 GCExport void gc_heap_write_ref (Managed_Object_Handle p_base_of_object_with_slot,
 376                                  unsigned offset,
 377                                  Managed_Object_Handle value);
 378 /**
 379  * @copydoc gc_heap_write_ref()
 380  */
 381 GCExport void gc_heap_slot_write_ref (Managed_Object_Handle p_base_of_object_with_slot,
 382                                       Managed_Object_Handle *p_slot,
 383                                       Managed_Object_Handle value);
 384 
 385 /**
 386  * @copydoc gc_heap_write_ref()
 387  */
 388 GCExport void gc_heap_slot_write_ref_compressed (Managed_Object_Handle p_base_of_object_with_slot,
 389                                                  uint32 *p_slot,
 390                                                  Managed_Object_Handle value);
 391 
 392 
 393 
 394 /**
 395  * Pin object.
 396  */
 397 GCExport void gc_pin_object (Managed_Object_Handle* p_object);
 398 
 399 /**
 400  * Unpin object.
 401  */
 402 GCExport void gc_unpin_object (Managed_Object_Handle* p_object);
 403 
 404 
 405 
 406 /*
 407  * *****
 408  * *
 409  * *  Routines to support soft, weak, and phantom reference objects.
 410  * * 
 411  * *****
 412  */
 413 
 414 /**
 415  * @return the base address of the heap.
 416  *
 417  * API for VM to determine the starting and ending adddresses of the heap
 418  */
 419 GCExport void *gc_heap_base_address();
 420 
 421 /**
 422  * @return the top address of the heap.
 423  */
 424 GCExport void *gc_heap_ceiling_address();
 425 
 426 GCExport void  gc_register_delinquent_regions(void **, int);
 427 GCExport void *gc_get_latest_path_information();
 428 GCExport Boolean gc_is_heap_object(void *p_obj);
 429 GCExport Boolean gc_is_object_long_lived(void *p_obj);

Attached Files

To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.

You are not allowed to attach a file to this page.