//优化缓存查找传入的参数cache 用于表示是否找到cache的布尔量, //从_class_lookupMethodAndLoadCache3进来的是在缓存中已经找过并且没找到的情景,这时候cache为NO if (cache) { //如果传入的是YES,那么就会调用cache_getImp方法去找到缓存里面的IMP,注意cache_getImp是通过汇编实现的,cache_getImp会把找到的IMP放在r11中 imp = cache_getImp(cls, sel); if (imp) return imp; }
//锁住读写锁 runtimeLock.lock(); //....... //如果已经初始化会在objc_class对应的标识位设置为true if (!cls->isRealized()) { //实例化类结构 realizeClass(cls); }
// +initialize就是在这个阶段调用的 if (initialize && !cls->isInitialized()) { runtimeLock.unlock(); //_class_initialize是类初始化的过程。它会发送一个initialize消息给当前类 _class_initialize (_class_getNonMetaClass(cls, inst)); runtimeLock.lock(); //如果我们发送的sel就是initialize那么这里的_class_initialize会发送一次+initialize,后续还会发送一次+initialize,但是这种情况很少见 // If sel == initialize, _class_initialize will send +initialize and // then the messenger will send +initialize again after this // procedure finishes. Of course, if this is not being called // from the messenger then it won't happen. 2778172 }
retry: runtimeLock.assertLocked();
// 从缓存中查找方法实现 imp = cache_getImp(cls, sel); if (imp) goto done;
//尝试在本类的方法列表中查找 { Method meth = getMethodNoSuper_nolock(cls, sel); if (meth) { //找到的情况下添加到缓存并返回方法实现 log_and_fill_cache(cls, meth->imp, sel, inst, cls); imp = meth->imp; goto done; } } //尝试在父类的缓存和方法列表中查找 { //如果以上尝试都失败了,接下来就会循环尝试父类的缓存和方法列表。一直找到NSObject为止。因为NSObject的superclass为nil,才跳出循环。 unsigned attempts = unreasonableClassCount(); for (Class curClass = cls->superclass; curClass != nil; curClass = curClass->superclass) { // Halt if there is a cycle in the superclass chain. if (--attempts == 0) { _objc_fatal("Memory corruption in class list."); } // Superclass cache. imp = cache_getImp(curClass, sel); if (imp) { if (imp != (IMP)_objc_msgForward_impcache) { // Found the method in a superclass. Cache it in this class. log_and_fill_cache(cls, imp, sel, inst, curClass); goto done; } else { // Found a forward:: entry in a superclass. // Stop searching, but don't cache yet; call method // resolver for this class first. break; } } // Superclass method list. Method meth = getMethodNoSuper_nolock(curClass, sel); if (meth) { log_and_fill_cache(cls, meth->imp, sel, inst, curClass); imp = meth->imp; goto done; } } } //没有找到实现方法,尝试寻找方法的解决者 // No implementation found. Try method resolver once. if (resolver && !triedResolver) { runtimeLock.unlock(); //如果父类找到NSObject还没有找到,那么就会开始尝试_class_resolveMethod方法。 //注意,这些需要打开读锁,因为开发者可能会在这里动态增加方法实现,所以不需要缓存结果。 //此处虽然锁被打开,可能会出现线程问题,所以在执行完_class_resolveMethod方法之后,会goto retry,重新执行一遍之前查找的过程。 // try [nonMetaClass resolveClassMethod:sel] // and [cls resolveInstanceMethod:sel] _class_resolveMethod(cls, sel, inst); runtimeLock.lock(); //寻找用户指定的方法的解决者 // Don't cache the result; we don't hold the lock so it may have // changed already. Re-do the search from scratch instead. triedResolver = YES; //重新查找 goto retry; }
enumdyld_image_states { dyld_image_state_mapped =10, // No batch notification for this dyld_image_state_dependents_mapped =20, // Only batch notification for this dyld_image_state_rebased =30, dyld_image_state_bound =40, dyld_image_state_dependents_initialized =45, // Only single notification for this dyld_image_state_initialized =50, dyld_image_state_terminated =60// Only single notification for this };
// // Note: onlyfor use by objc runtime // Register handlers to be calledwhen objc images are mapped, unmapped, and initialized. // Dyld will call back the "mapped" functionwith an arrayof images that contain an objc-image-info section. // Those images that are dylibs will have the ref-counts automatically bumped, so objc will no longer need to // call dlopen() on them to keep them from being unloaded. During the callto _dyld_objc_notify_register(), // dyld will call the "mapped" functionwith already loaded objc images. During any later dlopen() call, // dyld will alsocall the "mapped" function. Dyld will call the "init" functionwhen dyld would be called // initializers in that image. This iswhen objc calls any +load methods in that image. // 通过这个方法可以向dyld注册用于处理镜像完成映射,取消映射和初始化之后的处理方法。
void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int unoptimizedTotalClasses) { //......
// =================================查找classes================================= // Discover classes. Fix up unresolved future classes. Mark bundle classes. for (EACH_HEADER) { //从Mach-O的 __DATA区 __objc_classlist 获取所有类,并加入gdb_objc_realized_classes list中 classref_t *classlist = _getObjc2ClassList(hi, &count); //....... for (i = 0; i < count; i++) { Class cls = (Class)classlist[i]; Class newCls = readClass(cls, headerIsBundle, headerIsPreoptimized);
if (newCls != cls && newCls) { // 类被移动了但是没有被删除的情况 // Class was moved but not deleted. Currently this occurs // only when the new class resolved a future class. // Non-lazily realize the class below. resolvedFutureClasses = (Class *)realloc(resolvedFutureClasses, (resolvedFutureClassCount+1) * sizeof(Class)); resolvedFutureClasses[resolvedFutureClassCount++] = newCls; } } }
ts.log("IMAGE TIMES: discover classes");
// 修复重新映射的类 // Fix up remapped classes // Class list and nonlazy class list remain unremapped. // Class refs and super refs are remapped for message dispatching. if (!noClassesRemapped()) { for (EACH_HEADER) { Class *classrefs = _getObjc2ClassRefs(hi, &count); for (i = 0; i < count; i++) { remapClassRef(&classrefs[i]); } // fixme why doesn't test future1 catch the absence of this? classrefs = _getObjc2SuperRefs(hi, &count); for (i = 0; i < count; i++) { remapClassRef(&classrefs[i]); } } }
// 重新映射类 ts.log("IMAGE TIMES: remap classes");
// Fix up @selector references static size_t UnfixedSelectors; { mutex_locker_t lock(selLock); for (EACH_HEADER) { if (hi->isPreoptimized()) continue; bool isBundle = hi->isBundle(); SEL *sels = _getObjc2SelectorRefs(hi, &count); UnfixedSelectors += count; for (i = 0; i < count; i++) { const char *name = sel_cname(sels[i]); // 3. 注册Sel,并存储到全局变量namedSelectors的list中 sels[i] = sel_registerNameNoLock(name, isBundle); } } }
ts.log("IMAGE TIMES: fix up selector references");
//.....
// Discover protocols. Fix up protocol refs. for (EACH_HEADER) { extern objc_class OBJC_CLASS_$_Protocol; Class cls = (Class)&OBJC_CLASS_$_Protocol; assert(cls); NXMapTable *protocol_map = protocols(); bool isPreoptimized = hi->isPreoptimized(); bool isBundle = hi->isBundle(); //找到所有Protocol并处理引用 protocol_t **protolist = _getObjc2ProtocolList(hi, &count); for (i = 0; i < count; i++) { readProtocol(protolist[i], cls, protocol_map, isPreoptimized, isBundle); } }
ts.log("IMAGE TIMES: discover protocols");
// Fix up @protocol references // Preoptimized images may have the right // answer already but we don't know for sure. for (EACH_HEADER) { protocol_t **protolist = _getObjc2ProtocolRefs(hi, &count); for (i = 0; i < count; i++) { remapProtocolRef(&protolist[i]); } }
ts.log("IMAGE TIMES: fix up @protocol references");
// Realize non-lazy classes (for +load methods and static instances) for (EACH_HEADER) { classref_t *classlist = _getObjc2NonlazyClassList(hi, &count); for (i = 0; i < count; i++) { Class cls = remapClass(classlist[i]); if (!cls) continue; addClassTableEntry(cls); realizeClass(cls); } }
ts.log("IMAGE TIMES: realize non-lazy classes");
// Realize newly-resolved future classes, in case CF manipulates them if (resolvedFutureClasses) { for (i = 0; i < resolvedFutureClassCount; i++) { realizeClass(resolvedFutureClasses[i]); resolvedFutureClasses[i]->setInstancesRequireRawIsa(false/*inherited*/); } free(resolvedFutureClasses); }
for (i = 0; i < count; i++) { category_t *cat = catlist[i]; //获取到所属的类 Class cls = remapClass(cat->cls);
if (!cls) { // Category's target class is missing (probably weak-linked). // Disavow any knowledge of this category. catlist[i] = nil; if (PrintConnecting) { _objc_inform("CLASS: IGNORING category \?\?\?(%s) %p with " "missing weak-linked target class", cat->name, cat); } continue; }
// Process this category. // First, register the category with its target class. // Then, rebuild the class's method lists (etc) if // the class is realized. bool classExists = NO; //如果有实例方法,协议或者实例属性 if (cat->instanceMethods || cat->protocols || cat->instanceProperties) { addUnattachedCategoryForClass(cat, cls, hi); if (cls->isRealized()) { //将分类添到属性,方法,协议添加到对应的类中 remethodizeClass(cls); classExists = YES; } if (PrintConnecting) { _objc_inform("CLASS: found category -%s(%s) %s", cls->nameForLogging(), cat->name, classExists ? "on existing class" : ""); } } //如果有类方法,协议或者类属性 if (cat->classMethods || cat->protocols || (hasClassProperties && cat->_classProperties)) { addUnattachedCategoryForClass(cat, cls->ISA(), hi); if (cls->ISA()->isRealized()) { remethodizeClass(cls->ISA()); } if (PrintConnecting) { _objc_inform("CLASS: found category +%s(%s)", cls->nameForLogging(), cat->name); } } } }
ts.log("IMAGE TIMES: discover categories");
// Category discovery MUST BE LAST to avoid potential races // when other threads call the new category code before // this thread finishes its fixups.
// +load handled by prepare_load_methods()
if (DebugNonFragileIvars) { realizeAllClasses(); } //...... }
isMeta = ro->flags & RO_META; //判断是否是元类 rw->version = isMeta ? 7 : 0; // old runtime went up to 6 //版本信息,旧版本的版本信息为6
//..... // Realize superclass and metaclass, if they aren't already. // This needs to be done after RW_REALIZED is set above, for root classes. // This needs to be done after class index is chosen, for root metaclasses. // 【✨】为supercls,metacls 分配空间 supercls = realizeClass(remapClass(cls->superclass)); metacls = realizeClass(remapClass(cls->ISA()));
//......
// 【✨】 Update superclass and metaclass in case of remapping cls->superclass = supercls; //将supercls赋给 cls->superclass //将上面分配的metacls赋给cls cls->initClassIsa(metacls);
// 调整实例变量的偏移和布局,这个将会重新分配class_ro_t // Reconcile instance variable offsets / layout. // This may reallocate class_ro_t, updating our ro variable. if (supercls && !isMeta) reconcileInstanceVariables(cls, supercls, ro);
//设置对象尺寸 // Set fastInstanceSize if it wasn't set already. cls->setInstanceSize(ro->instanceSize);
//【✨】从ro中拷贝部分标志位到rw 字段 // Copy some flags from ro to rw if (ro->flags & RO_HAS_CXX_STRUCTORS) { cls->setHasCxxDtor(); if (! (ro->flags & RO_HAS_CXX_DTOR_ONLY)) { cls->setHasCxxCtor(); } }
// 【✨】 Connect this class to its superclass's subclass lists // 将当前class与父类相关连 if (supercls) { //将当前类作为supercls的子类添加到父类的子类列表 addSubclass(supercls, cls); } else { //将当前类作为根类 addRootClass(cls); } //【✨】实例化类结构 // Attach categories // 使得类有条理 methodizeClass(cls);
// Root classes get bonus method implementations if they don't have // them already. These apply before category replacements. if (cls->isRootMetaclass()) { // root metaclass addMethod(cls, SEL_initialize, (IMP)&objc_noop_imp, "", NO); }
/*********************************************************************** * call_load_methods * Call all pending class and category +load methods. * Class +load methods are called superclass-first. * Category +load methods are not called until after the parent class's +load. * * This method must be RE-ENTRANT, because a +load could trigger * more image mapping. In addition, the superclass-first ordering * must be preserved in the face of re-entrant calls. Therefore, * only the OUTERMOST call of this function will do anything, and * that call will handle all loadable classes, even those generated * while it was running. * * The sequence below preserves +load ordering in the face of * image loading during a +load, and make sure that no * +load method is forgotten because it was added during * a +load call. * Sequence: * 1. Repeatedly call class +loads until there aren't any more * 2. Call category +loads ONCE. * 3. Run more +loads if: * (a) there are more classes to load, OR * (b) there are some potential category +loads that have * still never been attempted. * Category +loads are only run once to ensure "parent class first" * ordering, even if a category +load triggers a new loadable class * and a new loadable category attached to that class. * * Locking: loadMethodLock must be held by the caller * All other locks must not be held. **********************************************************************/ void call_load_methods(void) { static bool loading = NO; bool more_categories; loadMethodLock.assertLocked(); // Re-entrant calls do nothing; the outermost call will finish the job. if (loading) return; loading = YES; void *pool = objc_autoreleasePoolPush(); do { // 1. Repeatedly call class +loads until there aren't any more while (loadable_classes_used > 0) { //调用类的load方法 call_class_loads(); } // 2. Call category +loads ONCE //调用分类的load方法 more_categories = call_category_loads(); // 3. Run more +loads if there are classes OR more untried categories } while (loadable_classes_used > 0 || more_categories); objc_autoreleasePoolPop(pool); loading = NO; }
staticvoidcall_class_loads(void) { // Detach current loadable list. structloadable_class *classes = loadable_classes;/*这是preppare阶段构造的*/ int used = loadable_classes_used; loadable_classes = nil; loadable_classes_allocated = 0; loadable_classes_used = 0;
// Call all +loads for the detached list. for (i = 0; i < used; i++) { Class cls = classes[i].cls;
load_method_t load_method = (load_method_t)classes[i].method; if (!cls) continue;
if (PrintLoading) { _objc_inform("LOAD: +[%s load]\n", cls->nameForLogging()); } //调用+load方法 (*load_method)(cls, SEL_load); } // Destroy the detached list. if (classes) free(classes); }
staticboolcall_category_loads(void) { int i, shift; bool new_categories_added = NO; // Detach current loadable list. structloadable_category *cats = loadable_categories;/*数据来源 在prepare阶段构建*/ int used = loadable_categories_used; int allocated = loadable_categories_allocated; loadable_categories = nil; loadable_categories_allocated = 0; loadable_categories_used = 0;
// Call all +loads for the detached list. for (i = 0; i < used; i++) { Category cat = cats[i].cat; load_method_t load_method = (load_method_t)cats[i].method; Class cls; if (!cat) continue;
// Compact detached list (order-preserving) shift = 0; for (i = 0; i < used; i++) { if (cats[i].cat) { cats[i-shift] = cats[i]; } else { shift++; } } used -= shift;
// Copy any new +load candidates from the new list to the detached list. new_categories_added = (loadable_categories_used > 0); for (i = 0; i < loadable_categories_used; i++) { if (used == allocated) { allocated = allocated*2 + 16; cats = (struct loadable_category *) realloc(cats, allocated * sizeof(struct loadable_category)); } cats[used++] = loadable_categories[i]; }
// Destroy the new list. if (loadable_categories) free(loadable_categories);
// Reattach the (now augmented) detached list. // But if there's nothing left to load, destroy the list. if (used) { loadable_categories = cats; loadable_categories_used = used; loadable_categories_allocated = allocated; } else { if (cats) free(cats); loadable_categories = nil; loadable_categories_used = 0; loadable_categories_allocated = 0; }
if (PrintLoading) { if (loadable_categories_used != 0) { _objc_inform("LOAD: %d categories still waiting for +load\n", loadable_categories_used); } }
我们知道编程语言有静态语言和动态语言之分,静态语言在编译的时候就已经明确了每行代码最终执行哪些代码,Objective C 作为动态语言它的底层是由编译器和Runtime构成,编译时期只是决定向某个对象发送某个消息,但是最终这个对象怎么处理这个消息取决于这个对象而不是固定的,也就是说在编译之后还可以针对发过来的消息对这个消息进行一系列处理最终决定执行哪些代码,这部分工作都交由Runtime处理。在介绍Runtime细节之前我们先看下相关的数据结构,熟悉这些数据结构对理解Runtime来说是必不可少的。本文将以Objc 2.0 中的数据结构为研究对象。
structobjc_class : objc_object { // Class ISA; Class superclass; cache_t cache; // formerly cache pointer and vtable class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags //..... };
我们从它的定义上看,它其实也是一个objc_object,所以objc_class也会有一个isa,它其实指向的是Meta class,Meta class 也是一个objc_class它和objc_object指向objc_class区别在于它的cache以及bits中存放的是类静态方法等数据。这些会在后面介绍。
// link any inserted libraries // 链接任何插入的库 // do this after linking main executable so that any dylibs pulled in by inserted // dylibs (e.g. libSystem) will not be in front of dylibs the program uses if ( sInsertedDylibCount > 0 ) { for(unsignedint i=0; i < sInsertedDylibCount; ++i) { ImageLoader* image = sAllImages[i+1]; link(image, sEnv.DYLD_BIND_AT_LAUNCH, true, ImageLoader::RPathChain(NULL, NULL)); image->setNeverUnloadRecursive(); } // only INSERTED libraries can interpose // register interposing info after all inserted libraries are bound so chaining works for(unsignedint i=0; i < sInsertedDylibCount; ++i) { ImageLoader* image = sAllImages[i+1]; image->registerInterposing(); } }
// <rdar://problem/19315404> dyld should support interposition even without DYLD_INSERT_LIBRARIES for (int i=sInsertedDylibCount+1; i < sAllImages.size(); ++i) { ImageLoader* image = sAllImages[i]; if ( image->inSharedCache() ) continue; image->registerInterposing(); }
// apply interposing to initial set of images for(int i=0; i < sImageRoots.size(); ++i) { sImageRoots[i]->applyInterposing(gLinkContext); } gLinkContext.linkingMainExecutable = false;
sMainExecutable->weakBind(gLinkContext);
initializeMainExecutable();
// 寻找主程序的入口 // find entry point for main executable result = (uintptr_t)sMainExecutable->getThreadPC(); if ( result != 0 ) { // 主可执行文件使用lc_main,需要返回libdyld.dylib中的glue //调用main() //当执行完dyld::_main方法之后,返回了main()函数地址,这个时候所有初始化工作都已经完成了,正式进入Objc声明周期 // main executable uses LC_MAIN, needs to return to glue in libdyld.dylib if ( (gLibSystemHelpers != NULL) && (gLibSystemHelpers->version >= 9) ) *startGlue = (uintptr_t)gLibSystemHelpers->startGlueToCallExit; else halt("libdyld.dylib support not present for LC_MAIN"); } else { // main executable uses LC_UNIXTHREAD, dyld needs to let "start" in program set up for main() result = (uintptr_t)sMainExecutable->getMain(); *startGlue = 0; } } catch(constchar* message) { syncAllImages(); halt(message); } catch(...) { dyld::log("dyld: launch failed\n"); } return result; }
在main函数中会执行如下操作:
将主程序初始化为imageLoader,用于后续的链接等操作
加载共享库到内存
加载插入的动态库
链接主程序,链接插入库
初始化主程序
寻找主程序入口点
接下来我们将按照上面的顺序进行展开:
将主程序初始化为imageLoader
在dyld main方法中会通过instantiateFromLoadedImage创建ImageLoader,其中第一个参数传入的是mainExecutableMH为主程序的Mach O Header,有了mainExecutableMH,dyld就可以从头开始遍历整个Mach O文件信息了。在开始创建ImageLoader之前会先调用isCompatibleMachO来查看mainExecutableMH中的cputype与cpusubtype是否与当前设备兼容,只有兼容的情况下才会继续创建ImageLoader,创建后的ImageLoader会通过addImage添加到sAllImages,然后调用addMappedRange()申请内存,更新主程序镜像映射的内存区。
static ImageLoader* instantiateFromLoadedImage(constmacho_header*mh, uintptr_tslide, constchar*path) { // 检测mach-o header的cputype与cpusubtype是否与当前系统兼容 if ( isCompatibleMachO((constuint8_t*)mh, path) ) { //初始化镜像加载器 ImageLoader* image = ImageLoaderMachO::instantiateMainExecutable(mh, slide, path, gLinkContext); addImage(image); return image; } throw "main executable not a known format"; }
ImageLoader* load(const char* path, const LoadContext& context) { //..... // 尝试所有路径排列并检查现有加载的镜像 // try all path permutations and check against existing loaded images ImageLoader* image = loadPhase0(path, orgPath, context, NULL); if ( image != NULL ) { return image; }
//...... image = loadPhase0(path, orgPath, context, &exceptions); if ( (image == NULL) && cacheablePath(path) && !context.dontLoad ) { //...... if ( (myerr == ENOENT) || (myerr == 0) ) { // see if this image is in shared cache //...... if ( findInSharedCacheImage(resolvedPath, false, NULL, &mhInCache, &pathInCache, &slideInCache) ) { //...... image = ImageLoaderMachO::instantiateFromCache(mhInCache, pathInCache, slideInCache, stat_buf, gLinkContext); image = checkandAddImage(image, context); } } } }
static ImageLoader* loadPhase6(intfd, conststructstat& stat_buf, constchar*path, const LoadContext& context) { //....... // try mach-o loader if ( shortPage ) throw "file too short"; if ( isCompatibleMachO(firstPage, path) ) { // 只有MH_BUNDLE,MH_DYLIB,以及一些MH_EXECUTE 才能被动态加载 switch ( ((mach_header*)firstPage)->filetype ) { case MH_EXECUTE: case MH_DYLIB: case MH_BUNDLE: break; default: throw "mach-o, but wrong filetype"; } //实例化镜像 ImageLoader* image = ImageLoaderMachO::instantiateFromFile(path, fd, firstPage, fileOffset, fileLength, stat_buf, gLinkContext); // validate return checkandAddImage(image, context); } //..... }
链接主程序
link(sMainExecutable, sEnv.DYLD_BIND_AT_LAUNCH, true, ImageLoader::RPathChain(NULL, NULL)); //...... // 4.link any inserted libraries // 链接任何插入的库 // do this after linking main executable so that any dylibs pulled in by inserted // dylibs (e.g. libSystem) will not be in front of dylibs the program uses if ( sInsertedDylibCount > 0 ) { for(unsignedint i=0; i < sInsertedDylibCount; ++i) { ImageLoader* image = sAllImages[i+1]; link(image, sEnv.DYLD_BIND_AT_LAUNCH, true, ImageLoader::RPathChain(NULL, NULL)); image->setNeverUnloadRecursive(); } //...... } //...... gLinkContext.linkingMainExecutable = false; sMainExecutable->weakBind(gLinkContext);
void ImageLoader::recursiveRebase(const LinkContext& context) { if ( fState < dyld_image_state_rebased ) { // break cycles fState = dyld_image_state_rebased; try { // rebase lower level libraries first for(unsigned int i=0; i < libraryCount(); ++i) { ImageLoader* dependentImage = libImage(i); if ( dependentImage != NULL ) dependentImage->recursiveRebase(context); } // rebase this image doRebase(context); // notify context.notifySingle(dyld_image_state_rebased, this); } //...... } }
在之前的步骤中我们都在完成库的加载,但是这些dylibs之间是没有关联的,需要rebase,binding对地址修正,不知道大家看过高达之类的动画片没有,在变身过程中装备会从四面八方飞过来,然后会一个个安装到身上,嗯,link就是这个过程,动态库中的地址是相对的,因为它需要保证它内部逻辑的独立性,同时为了降低缓冲区溢出攻击的成功率主流的操作系统都会采用ASLR(Address space layout randomization)技术,它通过对堆、栈、共享库映射等线性区布局的随机化来增加攻击者预测目的地址的难度,防止攻击者直接定位攻击代码位置,达到阻止溢出攻击的目的。
// if prebound and loaded at prebound address, and all libraries are same as when this was prebound, then no need to bind // note: flat-namespace binaries need to have imports rebound (even if correctly prebound) if ( this->usablePrebinding(context) ) { // don't need to bind } else { #if TEXT_RELOC_SUPPORT // if there are __TEXT fixups, temporarily make __TEXT writable if ( fTextSegmentBinds ) this->makeTextSegmentWritable(context, true); #endif
// run through all binding opcodes eachBind(context, &ImageLoaderMachOCompressed::bindAt); #if TEXT_RELOC_SUPPORT // if there were __TEXT fixups, restore write protection if ( fTextSegmentBinds ) this->makeTextSegmentWritable(context, false); #endif // if this image is in the shared cache, but depends on something no longer in the shared cache, // there is no way to reset the lazy pointers, so force bind them now if ( forceLazysBound || fInSharedCache ) this->doBindJustLazies(context);
// this image is in cache, but something below it is not. If // this image has lazy pointer to a resolver function, then // the stub may have been altered to point to a shared lazy pointer. if ( fInSharedCache ) this->updateOptimizedLazyPointers(context);
// tell kernel we are done with chunks of LINKEDIT if ( !context.preFetchDisabled ) this->markFreeLINKEDIT(context); } // set up dyld entry points in image // do last so flat main executables will have __dyld or __program_vars set up this->setupLazyPointerHandler(context); CRSetCrashLogMessage2(NULL); }
// 初始化任意插入的dylibs // run initialzers for any inserted dylibs if ( rootCount > 1 ) { //这里需要注意的是sImageRoots 中的第一个变量是主程序镜像, 因此这里初始化的时候需要跳过第一个数据, 对其他后面插入的dylib进行调用ImageLoader::runInitializers进行初始化 for(size_t i=1; i < rootCount; ++i) { sImageRoots[i]->runInitializers(gLinkContext, initializerTimes[0]); } } // 最后运行主程序的初始化器 // run initializers for main executable and everything it brings up sMainExecutable->runInitializers(gLinkContext, initializerTimes[0]);
result = (uintptr_t)sMainExecutable->getThreadPC(); if ( result != 0 ) { // 主可执行文件使用lc_main,需要返回libdyld.dylib中的glue //调用main() //当执行完dyld::_main方法之后,返回了main()函数地址,这个时候所有初始化工作都已经完成了,正式进入Objc声明周期 // main executable uses LC_MAIN, needs to return to glue in libdyld.dylib if ( (gLibSystemHelpers != NULL) && (gLibSystemHelpers->version >= 9) ) *startGlue = (uintptr_t)gLibSystemHelpers->startGlueToCallExit; else halt("libdyld.dylib support not present for LC_MAIN"); } else { // main executable uses LC_UNIXTHREAD, dyld needs to let "start" in program set up for main() result = (uintptr_t)sMainExecutable->getMain(); *startGlue = 0; }
main 地址有两种方式获取,一种是存放在LC_MAIN命令中,这时候需要调用getThreadPC
void* ImageLoaderMachO::getThreadPC()const { constuint32_t cmd_count = ((macho_header*)fMachOData)->ncmds; conststructload_command* const cmds = (struct load_command*)&fMachOData[sizeof(macho_header)]; conststructload_command* cmd = cmds; for (uint32_t i = 0; i < cmd_count; ++i) { // 遍历loadCommand,加载loadCommand中的'LC_MAIN'所指向的偏移地址 if ( cmd->cmd == LC_MAIN ) { entry_point_command* mainCmd = (entry_point_command*)cmd; // 偏移量 + header所占的字节数,就是main的入口 void* entry = (void*)(mainCmd->entryoff + (char*)fMachOData); if ( this->containsAddress(entry) ) return entry; else throw"LC_MAIN entryoff is out of range"; } cmd = (conststruct load_command*)(((char*)cmd)+cmd->cmdsize); } returnNULL; }
static int exec_mach_imgact(struct image_params *imgp) {
struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata; //........ /* * 首先确保这是个Mach-O 1.0 或者Mach-O 2.0二进制文件 * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference * is a reserved field on the end, so for the most part, we can * treat them as if they were identical. * magic检查 */ if ((mach_header->magic != MH_MAGIC/*32位架构*/) && (mach_header->magic != MH_MAGIC_64/*64位架构*/)) { error = -1; goto bad; }
// 如果文件类型为MH_DYLIB 或者MH_BUNDLE 返回错误 //为什么MH_DYLIB,MH_BUNDLE 要认定为error switch (mach_header->filetype) { case MH_DYLIB: case MH_BUNDLE: error = -1; goto bad; }
/* * NOTE: An error after this point indicates we have potentially * destroyed or overwrote some process state while attempting an * execve() following a vfork(), which is an unrecoverable condition. */
/* * We reset the task to 64-bit (or not) here. It may have picked up * a new map, and we need that to reflect its true 64-bit nature. */
/* * Set up the system reserved areas in the new address space. */ vm_map_exec(get_task_map(task), task, (void *) p->p_fd->fd_rdir, cpu_type()); //....... }
/* * 检查是否是正确的计算机类型 * Check to see if right machine type. */ if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) || !grade_binary(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) return(LOAD_BADARCH);
//主要是用来对Mach-O做检测,会检测Mach-O头部,解析其架构、检查imgp等内容, //并拒绝接受Dylib和Bundle这样的文件,这些文件会由dyld负责加载 switch (header->filetype) { case MH_OBJECT: case MH_EXECUTE: case MH_PRELOAD: //第一次的时候这里会走通 if (depth != 1) { return (LOAD_FAILURE); } break; case MH_FVMLIB: case MH_DYLIB: if (depth == 1) { return (LOAD_FAILURE); } break; case MH_DYLINKER: //第二次的时候这里会走通 if (depth != 2) { return (LOAD_FAILURE); } break; default: return (LOAD_FAILURE); } //..... /* * Round size of Mach-O commands up to page boundry. */ size = round_page(mach_header_sz + header->sizeofcmds); if (size <= 0) return(LOAD_BADMACHO);
/* * 将加载命令映射到内核地址 * Map the load commands into kernel memory. */
//.......
/* * 扫描每个命令,处理每个命令 * Scan through the commands, processing each one as necessary. */ for (pass = 1; pass <= 2; pass++) { /* * Loop through each of the load_commands indicated by the * Mach-O header; if an absurd value is provided, we just * run off the end of the reserved section by incrementing * the offset too far, so we are implicitly fail-safe. */ offset = mach_header_sz; //加载命令数目 ncmds = header->ncmds; while (ncmds--) { /* * 获取指向命令的地址 * Get a pointer to the command. */ lcp = (struct load_command *)(addr + offset); oldoffset = offset; offset += lcp->cmdsize; /* * Perform prevalidation of the struct load_command * before we attempt to use its contents. Invalid * values are ones which result in an overflow, or * which can not possibly be valid commands, or which * straddle or exist past the reserved section at the * start of the image. */ if (oldoffset > offset || lcp->cmdsize < sizeof(struct load_command) || offset > header->sizeofcmds + mach_header_sz) { ret = LOAD_BADMACHO; break; } /* * Act on struct load_command's for which kernel * intervention is required. */ switch(lcp->cmd) { /*加载64位segment*/ case LC_SEGMENT_64: if (pass != 1) break; ret = load_segment_64( (struct segment_command_64 *)lcp, pager, file_offset, macho_size, ubc_getsize(vp), map, result); break; /*加载32位segment*/ case LC_SEGMENT: if (pass != 1) break; ret = load_segment( (struct segment_command *) lcp, pager, file_offset, macho_size, ubc_getsize(vp), map, result); break; /*加载线程数据*/ case LC_THREAD: if (pass != 2) break; ret = load_thread((struct thread_command *)lcp, thread, result); break; /*加载unix线程数据*/ case LC_UNIXTHREAD: if (pass != 2) break; ret = load_unixthread( (struct thread_command *) lcp, thread, result); break; /*加载动态加载器 程序需要的dyld的路径*/ case LC_LOAD_DYLINKER: if (pass != 2) break; if ((depth == 1) && (dlp == 0)) { dlp = (struct dylinker_command *)lcp; dlarchbits = (header->cputype & CPU_ARCH_MASK); } else { ret = LOAD_FAILURE; } break; /*加载代码签名加载器*/ case LC_CODE_SIGNATURE: /* CODE SIGNING */ if (pass != 2) break; /* pager -> uip -> load signatures & store in uip set VM object "signed_pages" */ ret = load_code_signature( (struct linkedit_data_command *) lcp, vp, file_offset, macho_size, header->cputype, (depth == 1) ? result : NULL); if (ret != LOAD_SUCCESS) { printf("proc %d: load code signature error %d " "for file \"%s\"\n", p->p_pid, ret, vp->v_name); ret = LOAD_SUCCESS; /* ignore error */ } else { got_code_signatures = TRUE; } break; default: /* Other commands are ignored by the kernel */ ret = LOAD_SUCCESS; break; } if (ret != LOAD_SUCCESS) break; } if (ret != LOAD_SUCCESS) break; } //加载成功 if (ret == LOAD_SUCCESS) { if (! got_code_signatures) { structcs_blob *blob; /* no embedded signatures: look for detached ones */ blob = ubc_cs_blob_get(vp, -1, file_offset); if (blob != NULL) { /* get flags to be applied to the process */ result->csflags |= blob->csb_flags; } } //加载动态链接器dlp为从上面获取到的动态dyliner的路径 if (dlp != 0) //加载dylinker ret = load_dylinker(dlp, dlarchbits, map, thread, depth, result, abi64); //...... } //..... return(ret); }
switch (header->filetype) { case MH_OBJECT: case MH_EXECUTE: case MH_PRELOAD: //depth=1 时候这里会走通 if (depth != 1) { return (LOAD_FAILURE); } break; case MH_FVMLIB: case MH_DYLIB: //depth=1 时候这里会失败 if (depth == 1) { return (LOAD_FAILURE); } break; case MH_DYLINKER: //depth=1 时候这里会失败 if (depth != 2) { return (LOAD_FAILURE); } break; default: return (LOAD_FAILURE); }
这时候如果Mach O 文件类型为 MH_OBJECT,MH_EXECUTE,MH_PRELOAD 就会走到下面的流程,下面的流程会先执行LC_SEGMENT_64,LC_SEGMENT将某些可执行的代码映射到指定的内存区域。然后再执行LC_THREAD,LC_UNIXTHREAD,LC_LOAD_DYLINKER,LC_CODE_SIGNATURE。这里最为关键的命令是LC_LOAD_DYLINKER,它会对dyld进行赋值。在最后会调用:
ret = load_dylinker(dlp, dlarchbits, map, thread, depth, result, abi64);
static load_return_t load_dylinker( struct dylinker_command *lcp, integer_t archbits, vm_map_t map, thread_t thread, int depth, load_result_t *result, boolean_t is_64bit ) { //.......... /* * 首先直接映射dyld * First try to map dyld in directly. This should work most of * the time since there shouldn't normally be something already * mapped to its address. */ // 解析dyld ret = parse_machfile(vp, map, thread, &header, file_offset, macho_size, depth, &myresult); /* * If it turned out something was in the way, then we'll take * take this longer path to map dyld into a temporary map and * copy it into destination map at a different address. */ //如果加载成功设置会返回entry_point if (ret == LOAD_SUCCESS) { result->dynlinker = TRUE; result->entry_point = myresult.entry_point; (void)ubc_map(vp, PROT_READ | PROT_EXEC); } out: vnode_put(vp); return (ret); }
static load_return_t load_threadentry( thread_t thread, unsignedlong *ts, unsignedlong total_size, mach_vm_offset_t *entry_point ) { //...... /* * Set the thread state. */ *entry_point = MACH_VM_MIN_ADDRESS; while (total_size > 0) { flavor = *ts++; size = *ts++; entry_size = (size+2)*sizeof(unsignedlong); if (entry_size > total_size) return(LOAD_BADMACHO); total_size -= entry_size; /* * Third argument is a kernel space pointer; it gets cast * to the appropriate type in thread_entrypoint() based on * the value of flavor. */ ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point); if (ret != KERN_SUCCESS) { return(LOAD_FAILURE); } ts += size; /* ts is a (unsigned long *) */ } return(LOAD_SUCCESS); }
kern_return_t thread_entrypoint( __unused thread_t thread, int flavor, thread_state_t tstate, __unused unsignedint count, mach_vm_offset_t *entry_point ) { /* * Set a default. */ if (*entry_point == 0) *entry_point = VM_MIN_ADDRESS;
switch (flavor) { case x86_THREAD_STATE32: { x86_thread_state32_t *state25;
#define LC_SEGMENT 0x1 /* segment of this file to be mapped */ #define LC_SEGMENT_64 0x19 /* 64-bit segment of this file to be mapped */ #define LC_SYMTAB 0x2 /* link-edit stab symbol table info */ #define LC_SYMSEG 0x3 /* link-edit gdb symbol table info (obsolete) */ #define LC_THREAD 0x4 /* thread */ #define LC_UNIXTHREAD 0x5 /* unix thread (includes a stack) */ #define LC_LOADFVMLIB 0x6 /* load a specified fixed VM shared library */ #define LC_IDFVMLIB 0x7 /* fixed VM shared library identification */ #define LC_IDENT 0x8 /* object identification info (obsolete) */ #define LC_FVMFILE 0x9 /* fixed VM file inclusion (internal use) */ #define LC_PREPAGE 0xa /* prepage command (internal use) */ #define LC_DYSYMTAB 0xb /* dynamic link-edit symbol table info */ #define LC_LOAD_DYLIB 0xc /* load a dynamically linked shared library */ #define LC_ID_DYLIB 0xd /* dynamically linked shared lib ident */ #define LC_LOAD_DYLINKER 0xe /* load a dynamic linker */ #define LC_ID_DYLINKER 0xf /* dynamic linker identification */ #define LC_PREBOUND_DYLIB 0x10 /* modules prebound for a dynamically */ /* linked shared library */ #define LC_ROUTINES 0x11 /* image routines */ #define LC_SUB_FRAMEWORK 0x12 /* sub framework */ #define LC_SUB_UMBRELLA 0x13 /* sub umbrella */ #define LC_SUB_CLIENT 0x14 /* sub client */ #define LC_SUB_LIBRARY 0x15 /* sub library */ #define LC_TWOLEVEL_HINTS 0x16 /* two-level namespace lookup hints */ #define LC_PREBIND_CKSUM 0x17 /* prebind checksum */ #define LC_LOAD_WEAK_DYLIB (0x18 | LC_REQ_DYLD)
#define LC_ROUTINES_64 0x1a /* 64-bit image routines */ #define LC_UUID 0x1b /* the uuid */ #define LC_RPATH (0x1c | LC_REQ_DYLD) /* runpath additions */ #define LC_CODE_SIGNATURE 0x1d /* local of code signature */ #define LC_SEGMENT_SPLIT_INFO 0x1e /* local of info to split segments */ #define LC_REEXPORT_DYLIB (0x1f | LC_REQ_DYLD) /* load and re-export dylib */
下面是通过otool命令来输出的load commands:
otool -l IDLFundation
Load command 20 cmd LC_LOAD_DYLIB cmdsize80 name @rpath/FLAnimatedImage.framework/FLAnimatedImage (offset 24) time stamp 2 Thu Jan 108:00:021970 current version 1.0.0 compatibility version 1.0.0
-f print the fat headers -a print the archive header -h print the mach header -l print the load commands -L print shared libraries used -D print shared library id name -t print the text section (disassemble with -v) -x print all text sections (disassemble with -v) -p <routine name> start dissassemble from routine name -s <segname> <sectname> print contents of section -d print the data section -o print the Objective-C segment -r print the relocation entries -S print the table of contents of a library (obsolete) -T print the table of contents of a dynamic shared library (obsolete) -M print the module table of a dynamic shared library (obsolete) -R print the reference table of a dynamic shared library (obsolete) -I print the indirect symbol table -H print the two-level hints table (obsolete) -G print the data in code table -v print verbosely (symbolically) when possible -V print disassembled operands symbolically -c print argument strings of a core file -X printno leading addresses or headers -m don't use archive(member) syntax -B force Thumb disassembly (ARM objects only) -q use llvm's disassembler (the default) -Q use otool(1)'s disassembler -mcpu=arg use `arg' as the cpu for disassembly -j print opcode bytes -P print the info plist section as strings -C print linker optimization hints --version print the version of /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/otool
static ALWAYS_INLINE id callAlloc(Class cls, boolcheckNil/*false*/, boolallocWithZone=false/*true*/) { if (slowpath(checkNil && !cls)) return nil;
#if __OBJC2__ //hasCustomAWZ( )方法是用来判断当前class是否有自定义的allocWithZone。 if (fastpath(!cls->ISA()->hasCustomAWZ())) { // 没有自定义的alloc/allocWithZone实现的时候 // No alloc/allocWithZone implementation. Go straight to the allocator. // fixme store hasCustomAWZ in the non-meta class and // add it to canAllocFast's summary if (fastpath(cls->canAllocFast())) { // No ctors, raw isa, etc. Go straight to the metal. bool dtor = cls->hasCxxDtor(); id obj = (id)calloc(1, cls->bits.fastInstanceSize()); if (slowpath(!obj)) return callBadAllocHandler(cls); obj->initInstanceIsa(cls, dtor); return obj; } else { // Has ctor or raw isa or something. Use the slower path. id obj = class_createInstance(cls, 0); if (slowpath(!obj)) return callBadAllocHandler(cls); return obj; } } #endif
// No shortcuts available. if (allocWithZone) return [clsallocWithZone:nil]; return [clsalloc]; }
do { transcribeToSideTable = false; oldisa = LoadExclusive(&isa.bits); newisa = oldisa; if (slowpath(!newisa.nonpointer)) { ClearExclusive(&isa.bits); if (!tryRetain && sideTableLocked) sidetable_unlock(); if (tryRetain) return sidetable_tryRetain() ? (id)this : nil; else return sidetable_retain(); } // don't check newisa.fast_rr; we already called any RR overrides if (slowpath(tryRetain && newisa.deallocating)) { ClearExclusive(&isa.bits); if (!tryRetain && sideTableLocked) sidetable_unlock(); return nil; } uintptr_t carry; newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++
if (slowpath(carry)) { // newisa.extra_rc++ overflowed if (!handleOverflow) { ClearExclusive(&isa.bits); return rootRetain_overflow(tryRetain); } // Leave half of the retain counts inline and // prepare to copy the other half to the side table. if (!tryRetain && !sideTableLocked) sidetable_lock(); sideTableLocked = true; transcribeToSideTable = true; newisa.extra_rc = RC_HALF; newisa.has_sidetable_rc = true; } } while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)));
if (slowpath(transcribeToSideTable)) { // Copy the other half of the retain counts to the side table. sidetable_addExtraRC_nolock(RC_HALF); }
if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock(); return (id)this; }
ALWAYS_INLINE bool objc_object::rootRelease(bool performDealloc, bool handleUnderflow) { if (isTaggedPointer()) returnfalse;
bool sideTableLocked = false;
isa_t oldisa; isa_t newisa;
retry: do { oldisa = LoadExclusive(&isa.bits); newisa = oldisa; if (slowpath(!newisa.nonpointer)) { ClearExclusive(&isa.bits); if (sideTableLocked) sidetable_unlock(); return sidetable_release(performDealloc); } // don't check newisa.fast_rr; we already called any RR overrides uintptr_t carry; newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc-- if (slowpath(carry)) { // don't ClearExclusive() goto underflow; } } while (slowpath(!StoreReleaseExclusive(&isa.bits, oldisa.bits, newisa.bits)));
if (slowpath(sideTableLocked)) sidetable_unlock(); returnfalse;
underflow: // newisa.extra_rc-- underflowed: borrow from side table or deallocate
// abandon newisa to undo the decrement newisa = oldisa;
if (slowpath(newisa.has_sidetable_rc)) { if (!handleUnderflow) { ClearExclusive(&isa.bits); return rootRelease_underflow(performDealloc); }
// Transfer retain count from side table to inline storage.
if (!sideTableLocked) { ClearExclusive(&isa.bits); sidetable_lock(); sideTableLocked = true; // Need to start over to avoid a race against // the nonpointer -> raw pointer transition. goto retry; }
// Try to remove some retain counts from the side table. size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF);
// To avoid races, has_sidetable_rc must remain set // even if the side table count is now zero.
if (borrowed > 0) { // Side table retain count decreased. // Try to add them to the inline count. newisa.extra_rc = borrowed - 1; // redo the original decrement too bool stored = StoreReleaseExclusive(&isa.bits, oldisa.bits, newisa.bits); if (!stored) { // Inline update failed. // Try it again right now. This prevents livelock on LL/SC // architectures where the side table access itself may have // dropped the reservation. isa_t oldisa2 = LoadExclusive(&isa.bits); isa_t newisa2 = oldisa2; if (newisa2.nonpointer) { uintptr_t overflow; newisa2.bits = addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow); if (!overflow) { stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits, newisa2.bits); } } }
if (!stored) { // Inline update failed. // Put the retains back in the side table. sidetable_addExtraRC_nolock(borrowed); goto retry; }
// Decrement successful after borrowing from side table. // This decrement cannot be the deallocating decrement - the side // table lock and has_sidetable_rc bit ensure that if everyone // else tried to -release while we worked, the last one would block. sidetable_unlock(); returnfalse; } else { // Side table is empty after all. Fall-through to the dealloc path. } }
// Really deallocate.
if (slowpath(newisa.deallocating)) { ClearExclusive(&isa.bits); if (sideTableLocked) sidetable_unlock(); return overrelease_error(); // does not actually return } newisa.deallocating = true; if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
if (slowpath(sideTableLocked)) sidetable_unlock();
void *objc_destructInstance(idobj) { if (obj) { // Read all of the flags at once for performance. bool cxx = obj->hasCxxDtor(); bool assoc = obj->hasAssociatedObjects();
// This order is important. if (cxx) object_cxxDestruct(obj); if (assoc) _object_remove_assocations(obj); obj->clearDeallocating(); }
/*********************************************************************** * objc_destructInstance * Destroys an instance without freeing memory. * Calls C++ destructors. * Calls ARC ivar cleanup. * Removes associative references. * Returns `obj`. Does nothing if `obj` is nil. **********************************************************************/
void _object_remove_assocations(idobject) { vector< ObjcAssociation,ObjcAllocator<ObjcAssociation> > elements; { AssociationsManager manager; AssociationsHashMap &associations(manager.associations()); if (associations.size() == 0) return; disguised_ptr_t disguised_object = DISGUISE(object); AssociationsHashMap::iterator i = associations.find(disguised_object); if (i != associations.end()) { // copy all of the associations that need to be removed. ObjectAssociationMap *refs = i->second; for (ObjectAssociationMap::iterator j = refs->begin(), end = refs->end(); j != end; ++j) { elements.push_back(j->second); } // remove the secondary table. delete refs; associations.erase(i); } } // the calls to releaseValue() happen outside of the lock. for_each(elements.begin(), elements.end(), ReleaseValue()); }
清除剩余标识位
inline void objc_object::clearDeallocating() { if (slowpath(!isa.nonpointer)) { // Slow path for raw pointer isa. // 普通类型的指针指向的对象 sidetable_clearDeallocating(); } else if (slowpath(isa.weakly_referenced || isa.has_sidetable_rc)) { // Slow path for non-pointer isa with weak refs and/or side table data. // non-pointer isa 类型的对象。这种对象之所以要和普通对象区分开是因为它的弱引用标记以及是否使用散列表信息都存在“指针”中,所以要分开处理,但是处理的流程都是一样的,都是查看是否被弱引用指针指向,如果有则将这些设置为nil。紧接着从散列表中清除引用计数。最后将弱引用项从弱引用表中移除。 clearDeallocating_slow(); }
assert(!sidetable_present()); }
clearDeallocating 中实际上是用于处理剩余的标识位,这里分成是否是non-pointer isa: non-pointer isa 对象之所以要和普通对象区分开是因为它的弱引用标记以及是否使用散列表信息都存在“指针”中,所以要分开处理,但是处理的流程都是一样的,都是查看是否被弱引用指针指向,如果有则将这些设置为nil。紧接着从散列表中清除引用计数。最后将弱引用项从弱引用表中移除。
Class previouslyInitializedClass = nil; id oldObj; SideTable *oldTable; SideTable *newTable;
//..........
// Clean up old value, if any. if (haveOld) { //__weak 修饰的指针变量已经指向过某对象 // 需要把这个对象和此指针变量的关联断开 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location); }
// Assign new value, if any. if (haveNew) { // 关联新对象和 __weak 修饰的指针变量 newObj = (objc_object *) weak_register_no_lock(&newTable->weak_table, (id)newObj, location, crashIfDeallocating); // weak_register_no_lock returns nil if weak store should be rejected
// Set is-weakly-referenced bit in refcount table. if (newObj && !newObj->isTaggedPointer()) { // 设置 isa 指针的 weakly_referenced 位 / sidetable 中的 SIDE_TABLE_WEAKLY_REFERENCED 位 // 标记此对象被 __weak 修饰的指针变量指向了,dealloc 时可以加速置 nil 处理 newObj->setWeaklyReferenced_nolock(); } // 设置 __weak 修饰的指针变量的值为 newObj // Do not set *location anywhere else. That would introduce a race. *location = (id)newObj; } else { // No new value. The storage is not changed. } SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
return (id)newObj; }
id weak_register_no_lock(weak_table_t *weak_table, id referent_id, id *referrer_id, bool crashIfDeallocating) { objc_object *referent = (objc_object *)referent_id; objc_object **referrer = (objc_object **)referrer_id;
if (!referent || referent->isTaggedPointer()) returnreferent_id;
// ensure that the referenced object is viable bool deallocating; if (!referent->ISA()->hasCustomRR()) { deallocating = referent->rootIsDeallocating(); } else { BOOL (*allowsWeakReference)(objc_object *, SEL) = (BOOL(*)(objc_object *, SEL)) object_getMethodImplementation((id)referent, SEL_allowsWeakReference); if ((IMP)allowsWeakReference == _objc_msgForward) { return nil; } deallocating = ! (*allowsWeakReference)(referent, SEL_allowsWeakReference); }
if (deallocating) { if (crashIfDeallocating) { _objc_fatal("Cannot form weak reference to instance (%p) of " "class %s. It is possible that this object was " "over-released, or is in the process of deallocation.", (void*)referent, object_getClassName((id)referent)); } else { return nil; } }
// now remember it and where it is being stored weak_entry_t *entry; if ((entry = weak_entry_for_referent(weak_table, referent))) { append_referrer(entry, referrer); } else { weak_entry_t new_entry(referent, referrer); weak_grow_maybe(weak_table); weak_entry_insert(weak_table, &new_entry); }
// Do not set *referrer. objc_storeWeak() requires that the // value not change.