Memory management in OC controls the deallocation of objects by reference counting. In MRC, the reference count after reatan is +1, the reference count after release is -1, and the object is freed when the reference count is 0

retain

Look at the source code

ALWAYS_INLINE id objc_object::rootRetain() { return rootRetain(false, false); } ALWAYS_INLINE bool objc_object::rootTryRetain() { return rootRetain(true, false) ? true : false; } ALWAYS_INLINE ID {//1. If (isTaggedPointer()) return (id)this; bool sideTableLocked = false; bool transcribeToSideTable = false; isa_t oldisa; isa_t newisa; // retain reference count handling // do {transcribeToSideTable = false; oldisa = LoadExclusive(&isa.bits); newisa = oldisa; // 2 If it is not nonpointer, the hash reference count table is processed ++ if (slowPath (! newisa.nonpointer)) { ClearExclusive(&isa.bits); if (rawISA()->isMetaClass()) return (id)this; if (! tryRetain && sideTableLocked) sidetable_unlock(); if (tryRetain) return sidetable_tryRetain() ? (id)this : nil; else return sidetable_retain(); } // don't check newisa.fast_rr; we already called any RR overrides //3. If destructing returns nil directly if (slowpath(tryRetain && newisa.deallocating) {ClearExclusive(& ISa.bits); if (! tryRetain && sideTableLocked) sidetable_unlock(); return nil; } // 1 uintptr_t carry; Bits = addC (newISa.bits, RC_ONE, 0, &carry); // Reference count +1 newisa.bits = addc(newISa.bits, RC_ONE, 0, &carry); // extra_rc++ //5 Recursively call if (slowPath (carry)) {// newisa.extra_rc++ overstep 3 learning map if (! handleOverflow) { ClearExclusive(&isa.bits); return rootRetain_overflow(tryRetain); } // Leave half of the retain counts inline and // prepare to copy the other half to the side table. if (! tryRetain && ! sideTableLocked) sidetable_lock(); sideTableLocked = true; transcribeToSideTable = true; newisa.extra_rc = RC_HALF; newisa.has_sidetable_rc = true; } } while (slowpath(! StoreExclusive(&isa.bits, oldisa.bits, newisa.bits))); if (slowpath(transcribeToSideTable)) { // Copy the other half of the retain counts to the side table. //6 Store half of the original reference count in extra_rc and the other half in the hash table, and set newisa.has_sideTABLE_rc to true sidetable_addExtraRC_nolock(RC_HALF); } if (slowpath(! tryRetain && sideTableLocked)) sidetable_unlock(); return (id)this; }Copy the code
id objc_object::sidetable_retain() { #if SUPPORT_NONPOINTER_ISA ASSERT(! isa.nonpointer); #endif SideTable& table = SideTables()[this]; table.lock(); size_t& refcntStorage = table.refcnts[this]; if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) { refcntStorage += SIDE_TABLE_RC_ONE; } table.unlock(); return (id)this; } bool objc_object::sidetable_tryRetain() { #if SUPPORT_NONPOINTER_ISA ASSERT(! isa.nonpointer); #endif SideTable& table = SideTables()[this]; // NO SPINLOCK HERE // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(), // which already acquired the lock on our behalf. // fixme can't do this efficiently with os_lock_handoff_s // if (table.slock == 0) { // _objc_fatal("Do not call -_tryRetain."); // } bool result = true; auto it = table.refcnts.try_emplace(this, SIDE_TABLE_RC_ONE); auto &refcnt = it.first->second; if (it.second) { // there was no entry } else if (refcnt & SIDE_TABLE_DEALLOCATING) { result = false; } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) { refcnt += SIDE_TABLE_RC_ONE; } return result; }Copy the code
  • First check if it isa taggedPoint isa. If it is returned directly,taggedPoint does not require reference counting to maintain the lifecycle
  • Check if it is nonpointer ISA, and if it is not nonpointer, the hash table reference counts ++
  • Returns directly if destructor is in progress
  • If it is nonpointer ISA directly add the ISA identifier bit extra_rc++
  • If the reference count is out of the range stored in extra, the original half is stored in EXTRA_RC and the other half in the hash table

release

ALWAYS_INLINE bool objc_object::rootRelease(bool performDealloc, bool handleUnderflow) { if (isTaggedPointer()) return false; bool sideTableLocked = false; isa_t oldisa; isa_t newisa; retry: do { oldisa = LoadExclusive(&isa.bits); newisa = oldisa; if (slowpath(! newisa.nonpointer)) { ClearExclusive(&isa.bits); if (rawISA()->isMetaClass()) return false; if (sideTableLocked) sidetable_unlock(); return sidetable_release(performDealloc); } // don't check newisa.fast_rr; we already called any RR overrides uintptr_t carry; newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc-- if (slowpath(carry)) { // don't ClearExclusive() goto underflow; } } while (slowpath(! StoreReleaseExclusive(&isa.bits, oldisa.bits, newisa.bits))); if (slowpath(sideTableLocked)) sidetable_unlock(); return false; underflow: // newisa.extra_rc-- underflowed: borrow from side table or deallocate // abandon newisa to undo the decrement newisa = oldisa; if (slowpath(newisa.has_sidetable_rc)) { if (! handleUnderflow) { ClearExclusive(&isa.bits); return rootRelease_underflow(performDealloc); } // Transfer retain count from side table to inline storage. if (! sideTableLocked) { ClearExclusive(&isa.bits); sidetable_lock(); sideTableLocked = true; // Need to start over to avoid a race against // the nonpointer -> raw pointer transition. goto retry; } // Try to remove some retain counts from the side table. size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF); // To avoid races, has_sidetable_rc must remain set // even if the side table count is now zero. if (borrowed > 0) { // Side table retain count decreased. // Try to add them to the inline count. newisa.extra_rc = borrowed - 1; // redo the original decrement too bool stored = StoreReleaseExclusive(&isa.bits, oldisa.bits, newisa.bits); if (! stored) { // Inline update failed. // Try it again right now. This prevents livelock on LL/SC // architectures where the  side table access itself may have // dropped the reservation. isa_t oldisa2 = LoadExclusive(&isa.bits); isa_t newisa2 = oldisa2; if (newisa2.nonpointer) { uintptr_t overflow; newisa2.bits = addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow); if (! overflow) { stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits, newisa2.bits); } } } if (! stored) { // Inline update failed. // Put the retains back in the side table. sidetable_addExtraRC_nolock(borrowed); goto retry; } // Decrement successful after borrowing from side table. // This decrement cannot be the deallocating decrement - the side // table lock and has_sidetable_rc bit ensure that if everyone // else tried to -release while we worked, the last one would block. sidetable_unlock(); return false; } else { // Side table is empty after all. Fall-through to the dealloc path. } } // Really deallocate. if (slowpath(newisa.deallocating)) { ClearExclusive(&isa.bits); if (sideTableLocked) sidetable_unlock(); return overrelease_error(); // does not actually return } newisa.deallocating = true; if (! StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; if (slowpath(sideTableLocked)) sidetable_unlock(); __c11_atomic_thread_fence(__ATOMIC_ACQUIRE); if (performDealloc) { ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc)); } return true; }Copy the code
  • Check if isTaggedPointer is an isTaggedPointer. TaggedPointer does not need to maintain a reference count.
  • If it is not nonpointer ISA, hand it to the hash table, do — on the reference count, and if the reference count of the hash table is cleared, send SEL_dealloc to the object, perform dealloc, and return.
  • The — operation is performed on EXTRA_rc of ISA if it is nonpointer_ISA, and the hash table is operated on when extra_RC counts zero.
  • If has_sideTABLE_rc is true, it manipulates the hash table, assigns the VALUE of RC-1 to extra_RC, and stores it, recursively if not, and goto retry if not. I’m going to recurse again
  • If ISA is destructing, an exception is run
  • If the reference count is 0, a dealloc message is sent on the object

Reference counting interview questions

Does alloc come out with a reference count of 1? Answer: No

// the extra_rc of objc is 0 NSObject *objc = [NSObject alloc]; NSLog(@"%ld",CFGetRetainCount((__bridge CFTypeRef)objc)); // Prints 1Copy the code

Alloc doesn’t come out with a 1, so why print a 1?

inline uintptr_t objc_object::rootRetainCount() // 1 { if (isTaggedPointer()) return (uintptr_t)this; sidetable_lock(); isa_t bits = LoadExclusive(&isa.bits); ClearExclusive(&isa.bits); if (bits.nonpointer) { // bits.extra_rc = 0; // uintptr_t rc = 1 + bits.extra_rc; If (bits.has_sidetable_rc) {rc += sidetable_getExtraRC_nolock(); // if (bits.has_sidetable_rc) {rc += sidetable_getExtraRC_nolock(); // hash} sidetable_unlock(); return rc; // 1 } sidetable_unlock(); return sidetable_retainCount(); }Copy the code

Uintptr_t rc = 1 + bits.extra_rc; The extra_rc is 0 at this point, and we’re giving you a default 1, so we’re printing a retainCount of 1

TaggedPointer

  1. Tagged Pointer is used to store small objects, such as NSNumber and NSDate

  2. The Tagged Pointer value is no longer an address, but a real value. So, it’s not really an object anymore, it’s just a normal variable in an object’s skin. Therefore, its memory is not stored in the heap and does not require malloc and free

  3. Three times more efficient at memory reads and 106 times faster at creation.

Here’s a taggedPointer interview question

- (void)viewDidLoad { [super viewDidLoad]; self.queue = dispatch_queue_create("com.helloword.cn", DISPATCH_QUEUE_CONCURRENT); for (int i = 0; i<10000; i++) { dispatch_async(self.queue, ^{ self.nameStr = [NSString stringWithFormat:@"helloword"]; NSLog(@"%@",self.nameStr); }); }} - (void)touch began :(NSSet< touch *> *)touches withEvent:(UIEvent *)event{// Newvalue realase oldValue taggedPointer affects */ NSLog(@" coming "); for (int i = 0; i<10000; I++) {dispatch_async(self.queue, ^{self.nameStr = [NSString stringWithFormat:@"helloword_bug "]; NSLog(@"%@",self.nameStr); }); }}Copy the code

I’m running the program, everything’s fine, and I get an error when I click on the screen, so why doesn’t the same code viewDidLoad crash, but touchesBegan crash? So the first thing to think about is multithreading plus setter getter, so let’s look at assembly

Error reported between retain and release. So let’s look at the setter source code

static ALWAYS_INLINE void _object_setIvar(id obj, Ivar ivar, id value, bool assumeStrong) { if (! obj || ! ivar || obj->isTaggedPointer()) return; ptrdiff_t offset; objc_ivar_memory_management_t memoryManagement; _class_lookUpIvar(obj->ISA(), ivar, offset, memoryManagement); if (memoryManagement == objc_ivar_memoryUnknown) { if (assumeStrong) memoryManagement = objc_ivar_memoryStrong; else memoryManagement = objc_ivar_memoryUnretained; } id *location = (id *)((char *)obj + offset); switch (memoryManagement) { case objc_ivar_memoryWeak: objc_storeWeak(location, value); break; case objc_ivar_memoryStrong: objc_storeStrong(location, value); break; case objc_ivar_memoryUnretained: *location = value; break; case objc_ivar_memoryUnknown: _objc_fatal("impossible"); } } void object_setIvar(id obj, Ivar ivar, id value) { return _object_setIvar(obj, ivar, value, false /*not strong default*/); } void object_setIvarWithStrongDefault(id obj, Ivar ivar, id value) { return _object_setIvar(obj, ivar, value, true /*strong default*/); } void objc_storeStrong(id *location, id obj) { id prev = *location; if (obj == prev) { return; } objc_retain(obj); *location = obj; objc_release(prev); }Copy the code

As you can see, setters are releasing the old value and retaining the new address, but in multithreading, it is possible to retain a value that has already been released so an error will be reported. So that begs the question, then why don’t one of them collapse? That’s where the problem might be with the name property, right? TaggedPointer influence

ViewDidLoad insidenameStrIs aNSTaggedPointerStringAnd thetouchBeginInside is a normal oneNSStringType.TaggedPointerObject retain will be retained if the conditions are not met