Barrier function

An 🌰

- (void)demo2{
    
  dispatch_queue_t concurrentQueue = dispatch_queue_create("cooci", DISPATCH_QUEUE_CONCURRENT);
  /* 1. Asynchronous function */
  dispatch_async(concurrentQueue, ^{
      NSLog(@"123");
     
  });
  
  dispatch_async(concurrentQueue, ^{
      NSLog(@"456");
      
  });
  
  /* 2. Fence function */ / / bar queue
  dispatch_barrier_async(concurrentQueue, ^{
      NSLog(@"-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - % @ -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --",[NSThread currentThread]);
  });
  /* 3. Asynchronous function */
  dispatch_async(concurrentQueue, ^{
      NSLog(@"Load so much, take a breath!!");
  });
  NSLog(@"********** dry up!!");

}
Copy the code

Output result:

支那123* * * *456**** ********** dry up!! * * * * -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -<NSThread: 0x600002344780>{number = 6, name = (null)} -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - * * * * load so much, to catch my breath!!!!!! 支那Copy the code

Dispatch_barrier_async blocks tasks added to the concurrentQueue after blocks within the barrier.

Dispatch_barrier_async we change dispatch_barrier_async to dispatch_barrier_sync

支那123* * * *456* * * * -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -<NSThread: 0x600002344780>{number = 6, name = (null)} -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - * * * * * * * * * * * * * * up dry!! ** ** loads so much, take a breath!! 支那Copy the code

From the above example we can see that dispatch_barrier_async will not be sent to the block code until the previous task is completed, but dispatch_barrier_sync will block the thread and affect the execution of subsequent tasks. The fencing function can only control the same queue, and it must be a custom queue. The global queue cannot be fenced, so let’s see how synchronization is implemented.

Dispatch_barrier_sync source code analysis

void
dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work)
{
	uintptr_t dc_flags = DC_FLAG_BARRIER | DC_FLAG_BLOCK;
	if (unlikely(_dispatch_block_has_private_data(work))) {
		return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
	}
	_dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
Copy the code

Dispatch_barrier_sync -> _dispatch_barrier_sync_f_dispatch_barrier_sync_F encapsulates _dispatch_barrier_SYNc_F_inline

static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
	dispatch_tid tid = _dispatch_tid_self();

	if(unlikely(dx_metatype(dq) ! = _DISPATCH_LANE_TYPE)) { DISPATCH_CLIENT_CRASH(0."Queue type doesn't support dispatch_sync");
	}

	dispatch_lane_t dl = upcast(dq)._dl;
	// The more correct thing to do would be to merge the qos of the thread
	// that just acquired the barrier lock into the queue state.
	//
	// However this is too expensive for the fast path, so skip doing it.
	// The chosen tradeoff is that if an enqueue on a lower priority thread
	// contends with this fast path, this thread may receive a useless override.
	//
	// Global concurrent queues and queues bound to non-dispatch threads
	// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
	if(unlikely(! _dispatch_queue_try_acquire_barrier_sync(dl, tid))) {return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
				DC_FLAG_BARRIER | dc_flags);
	}

	if (unlikely(dl->do_targetq->do_targetq)) {
		return _dispatch_sync_recurse(dl, ctxt, func,
				DC_FLAG_BARRIER | dc_flags);
	}
	_dispatch_introspection_sync_begin(dl);
	_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
			DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
					dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}
Copy the code

Add a sign breakpoint, go _dispatch_SYNC_F_slow

DISPATCH_NOINLINE
static void
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
		dispatch_function_t func, uintptr_t top_dc_flags,
		dispatch_queue_class_t dqu, uintptr_t dc_flags){... _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags DISPATCH_TRACE_ARG(&dsc)); }static void
_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_class_t dq,
		void *ctxt, dispatch_function_t func, uintptr_t dc_flags
		DISPATCH_TRACE_ARG(void *dc))
{
	_dispatch_sync_function_invoke_inline(dq, ctxt, func);
	_dispatch_trace_item_complete(dc);
	_dispatch_sync_complete_recurse(dq._dq, NULL, dc_flags);
}
Copy the code

Call _dispatch_sync_complete_RECURse, which has a do.. Dx_wakeup (dq, 0,DISPATCH_WAKEUP_BARRIER_COMPLETE) while _dispatch_lane_non_barrier_complete) The task in the wake queue starts to execute

static void
_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq, uintptr_t dc_flags)
{
	bool barrier = (dc_flags & DC_FLAG_BARRIER);
	do {
		if (dq == stop_dq) return;
		if (barrier) {
			dx_wakeup(dq, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE);
		} else {
			_dispatch_lane_non_barrier_complete(upcast(dq)._dl, 0);
		}
		dq = dq->do_targetq;
		barrier = (dq->dq_width == 1);
	} while (unlikely(dq->do_targetq));
}
Copy the code
#define dx_wakeup(x, y, z) dx_vtable(x)->dq_wakeup(x, y, z)
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
	.do_type        = DISPATCH_QUEUE_CONCURRENT_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_concurrent_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
	.do_type        = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
	.do_dispose     = _dispatch_object_no_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_object_no_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_root_queue_wakeup,
	.dq_push        = _dispatch_root_queue_push,
);
Copy the code

Global concurrent queue calls _dispatch_lane_wakeup and global concurrent queue calls _dispatch_root_queue_wakeup. If (Unlikely (flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { Flags DISPATCH_WAKEUP_BARRIER_COMPLETE goes _dispatch_lane_barrier_complete

void
_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
{
	dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;

	if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
		return _dispatch_lane_barrier_complete(dqu, qos, flags);
	}
	if (_dispatch_queue_class_probe(dqu)) {
		target = DISPATCH_QUEUE_WAKEUP_TARGET;
	}
	return _dispatch_queue_wakeup(dqu, qos, flags, target);
}
Copy the code

_dispatch_lane_barrier_complete This is a circular operation. If it is _dispatch_object_is_barrier then _dispatch_lane_DRAIN_barrier_complete Drain _dispatch_LANe_DRAIN _barriers does some work to wake up the barrier and re-mark the barrier state


_dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
{
	dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
	dispatch_lane_t dq = dqu._dl;

	if(dq->dq_items_tail && ! DISPATCH_QUEUE_IS_SUSPENDED(dq)) { struct dispatch_object_s *dc = _dispatch_queue_get_head(dq);if (likely(dq->dq_width == 1 || _dispatch_object_is_barrier(dc))) {
			if (_dispatch_object_is_waiter(dc)) {
				return _dispatch_lane_drain_barrier_waiter(dq, dc, flags, 0); }}else if (dq->dq_width > 1 && !_dispatch_object_is_barrier(dc)) {
			return _dispatch_lane_drain_non_barriers(dq, dc, flags);
		}

		if(! (flags & DISPATCH_WAKEUP_CONSUME_2)) { _dispatch_retain_2(dq); flags |= DISPATCH_WAKEUP_CONSUME_2; } target = DISPATCH_QUEUE_WAKEUP_TARGET; } uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;return _dispatch_lane_class_barrier_complete(dq, qos, flags, target, owned);
Copy the code

Dispatch_lane_barrier_waiter calls _dispatch_barrier_waiter_redirect_OR_WAKE: new and old states are imported

dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq, struct dispatch_object_s *dc, dispatch_wakeup_flags_t flags, uint64_t enqueued_bits)
{
	dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc;
	struct dispatch_object_s *next_dc;
	uint64_t next_owner = 0, old_state, new_state;

	next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter);
	next_dc = _dispatch_queue_pop_head(dq, dc);

transfer_lock_again:
	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
		if (unlikely(_dq_state_needs_ensure_ownership(old_state))) {
			_dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq);
			_dispatch_queue_move_to_contended_sync(dq->_as_dq);
			os_atomic_rmw_loop_give_up(goto transfer_lock_again);
		}

		new_state  = old_state;
		new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
		new_state &= ~DISPATCH_QUEUE_DIRTY;
		new_state |= next_owner;

		if (_dq_state_is_base_wlh(old_state)) {
			if (next_dc) {
				// we know there's a next item, keep the enqueued bit if any
			} else if (unlikely(_dq_state_is_dirty(old_state))) {
				os_atomic_rmw_loop_give_up({
					os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire);
					next_dc = os_atomic_load2o(dq, dq_items_head, relaxed);
					goto transfer_lock_again;
				});
			} else{ new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; new_state &= ~DISPATCH_QUEUE_ENQUEUED; }}else{ new_state -= enqueued_bits; }});return _dispatch_barrier_waiter_redirect_or_wake(dq, dc, flags,
			old_state, new_state);
}
Copy the code
DISPATCH_NOINLINE
static void
_dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, dispatch_object_t dc, dispatch_wakeup_flags_t flags, uint64_t old_state, uint64_t new_state){...// re-mark
	if (unlikely(_dq_state_is_inner_queue(old_state))) {
		dispatch_queue_t tq = dq->do_targetq;
		if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) {
			_dispatch_async_waiter_update(dsc, dq);
		}
		if (likely(tq->dq_width == 1)) {
			dsc->dc_flags |= DC_FLAG_BARRIER;
		} else {
			dispatch_lane_t dl = upcast(tq)._dl;
			dsc->dc_flags &= ~DC_FLAG_BARRIER;
			if (_dispatch_queue_try_reserve_sync_width(dl)) {
				return_dispatch_non_barrier_waiter_redirect_or_wake(dl, dc); }}// passing the QoS of `dq` helps pushing on low priority waiters with
		// legacy workloops.
		dsc->dsc_from_async = false;
        / / layer upon layer push
		returndx_push(tq, dsc, _dq_state_max_qos(old_state)); }...return _dispatch_waiter_wake(dsc, wlh, old_state, new_state);
}

Copy the code

When all preceding the barrier is complete, there is no barrier left, and a block inside the barrier is called to execute

  • _dispatch_sync_complete_recurse
  • _dispatch_lane_barrier_complete
  • _dispatch_lane_class_barrier_complete

Let’s look at the global concurrent queue

void
_dispatch_root_queue_wakeup(dispatch_queue_global_t dq, DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
{
	if(! (flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { DISPATCH_INTERNAL_CRASH(dq->dq_priority,"Don't try to wake up or override a root queue");
	}
	if (flags & DISPATCH_WAKEUP_CONSUME_2) {
		return_dispatch_release_2_tailcall(dq); }}Copy the code

There is no barrier handling in it, so the stack bar function is only useful for custom queues. See from the above source code, the fence function will go to the queue to find the previous queue of tasks to wake up, the front of the queue will be executed after the completion of the fence function block, the fence is completed after the completion of the marker. Fence function plays a blocking queue effect, global concurrent queue in addition to processing the user to join some events, but also to process a lot of system events, if the global concurrent queue to join the fence, the system may appear some unexpected situation. So fences cannot block global concurrent queues.

A semaphore

As we know, the fence function can only block the current custom queue. We sometimes use tripartite libraries, and if we want to do some processing in another queue, the fence function will no longer work. Semaphore common functions

  • dispatch_semaphore_createCreate semaphore
  • dispatch_semaphore_waitSemaphore wait
  • dispatch_semaphore_signalSemaphore release
  dispatch_semaphore_t semap = dispatch_semaphore_create(2);
    1 / / task
    dispatch_async(queue, ^{
        dispatch_semaphore_wait(semap, DISPATCH_TIME_FOREVER);
        NSLog(@"Mission 1");
        sleep(1);
        NSLog(@"Task 1 completed.");
        dispatch_semaphore_signal(semap);
    });
    
    2 / / task
    dispatch_async(queue, ^{
        dispatch_semaphore_signal(semap);
      
        NSLog(@"Mission 2");
        sleep(1);
        NSLog(@"Mission 2 completed.");

    });
Copy the code

when dispatch_semaphore_t semap = dispatch_semaphore_create(2);You can control the maximum concurrency to 2.NULL is returned when the semaphore is less than zero. Let’s see how this works:dispatch_semaphore_wait

intptr_t
dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{       
	long value = os_atomic_dec2o(dsema, dsema_value, acquire);
	if (likely(value >= 0)) {
		return 0;
	}
	return _dispatch_semaphore_wait_slow(dsema, timeout);
}
Copy the code

Os_atomic_dec2o: Wait for vlaue>=0. If vlaue is less than 0, execute _dispatch_semaphoRE_WAIT_slow


  static intptr_t
_dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
	long orig;

	_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
	switch (timeout) {
	default:
		if(! _dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) {break;
		}
		// Fall through and try to undo what the fast path did to
		// dsema->dsema_value
	case DISPATCH_TIME_NOW:
		orig = dsema->dsema_value;
		while (orig < 0) {
			if (os_atomic_cmpxchgv2o(dsema, dsema_value, orig, orig + 1,
					&orig, relaxed)) {
				return_DSEMA4_TIMEOUT(); }}// Another thread called semaphore_signal().
		// Fall through and drain the wakeup.
	case DISPATCH_TIME_FOREVER:
		_dispatch_sema4_wait(&dsema->dsema_sema);
		break;
	}
	return 0;
}
Copy the code

We usually pass “DISPATCH_TIME_FOREVER” to “dispatch_sema4_wait”.

void
_dispatch_sema4_wait(_dispatch_sema4_t *sema)
{
	int ret = 0;
	do {
		ret = sem_wait(sema);
	} while (ret == -1 && errno == EINTR);
	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
}
Copy the code

Inside is a do.. While loop, do.. The while loop does sem_wait(sema) so when we call dispatch_semaphore_wait(semap, DISPATCH_TIME_FOREVER); If the semaphore <0, it is equivalent to adding a do… While loop, at which point you are waiting

intptr_t
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
	long value = os_atomic_inc2o(dsema, dsema_value, release);
	if (likely(value > 0)) {
		return 0;
	}
	if (unlikely(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH(value,
				"Unbalanced call to dispatch_semaphore_signal()");
	}
	return _dispatch_semaphore_signal_slow(dsema);
}
Copy the code

Os_atomic_inc2o is a + 1 operation that returns 0 if the semaphore is greater than 0, otherwise, Cooling calls to dispatch_semaphore_signal() are executed _dispatch_semaphore_signal_slow to prevent errors in the program. These operations are performed even after the error is displayed

DISPATCH_NOINLINE
intptr_t
_dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
{
	_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
	_dispatch_sema4_signal(&dsema->dsema_sema, 1);
	return 1;
}
Copy the code

Scheduling group

Scheduling group functions commonly used apis are as follows: Dispatch_group_create Creates a group. Dispatch_group_async Dispatch_group_notify Notification that a task is added to a group. Dispatch_group_wait Wait time for a task to be added to a group

Dispatch_group_enter Dispatch_group_leave dispatch_group_leave exit group

Eg:

- (void)groupDemo{

    self.group = dispatch_group_create();
    dispatch_queue_t queue = dispatch_get_global_queue(0.0);
    dispatch_group_async(_group, queue, ^{
        // Create a scheduling group
        NSString *logoStr1 = @"Https://f12.baidu.com/it/u=711217113, & FM 818398466 = 72";
        NSData *data1 = [NSData dataWithContentsOfURL:[NSURL URLWithString:logoStr1]];
        [self.mArray addObject:data1];
    });


    dispatch_group_async( self.group , queue, ^{
        // Create a scheduling group
       NSString *logoStr2 = @"Https://f12.baidu.com/it/u=3172787957, & FM 1000491180 = 72";
        NSData *data2 = [NSData dataWithContentsOfURL:[NSURL URLWithString:logoStr2]];
        [self.mArray addObject:data2];
    });
    
     // Enter group and hire in pairs
   
    dispatch_group_enter( self.group );
    dispatch_async(queue, ^{
        // Create a scheduling group
       NSString *logoStr2 = @"Https://f12.baidu.com/it/u=3172787957, & FM 1000491180 = 72";
        NSData *data2 = [NSData dataWithContentsOfURL:[NSURL URLWithString:logoStr2]];
        [self.mArray addObject:data2];
       
    });
    
// long time = dispatch_group_wait(group, 1);
//
// if (time == 0) {
//
/ /}
    
    
    dispatch_group_notify( self.group , dispatch_get_main_queue(), ^{
        UIImage *newImage = nil;
       NSLog(@"Number of arrays :%ld",self.mArray.count);

    });

}

- (void)touchesBegan:(NSSet<UITouch *> *)touches withEvent:(UIEvent *)event {
    
    dispatch_group_leave( self.group );
    
}
Copy the code

When the screen is clicked, print the number of arrays :3**

We see that dispatch_group_enter dispatch_group_leave is dispatch_group_async, and then dispatch_group_notify is executed when they’re done. Dispatch_group_create source code before the bottom layer is encapsulated by the semaphore, but after 818, rewrite a set, implementation and semaphore more similar source code analysis:

dispatch_group_t
dispatch_group_create(void)
{
	return _dispatch_group_create_with_count(0);
}
Copy the code

So let’s look at _dispatch_group_create_with_count

static inline dispatch_group_t
_dispatch_group_create_with_count(uint32_t n)
{
	dispatch_group_t dg = _dispatch_object_alloc(DISPATCH_VTABLE(group),
			sizeof(struct dispatch_group_s));
	dg->do_next = DISPATCH_OBJECT_LISTLESS;
	dg->do_targetq = _dispatch_get_default_queue(false);
	if (n) {
		os_atomic_store2o(dg, dg_bits,
				(uint32_t)-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed);
		os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // <rdar://22318411>
	}
	return dg;
}
Copy the code

So let’s look at dispatch_group_enter

void
dispatch_group_enter(dispatch_group_t dg)
{
	// The value is decremented on a 32bits wide atomic so that the carry
	// for the 0 -> -1 transition is not propagated to the upper 32bits.
	uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits,
			DISPATCH_GROUP_VALUE_INTERVAL, acquire);
	uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK;
	if (unlikely(old_value == 0)) {
		_dispatch_retain(dg); // <rdar://problem/22318411>
	}
	if (unlikely(old_value == DISPATCH_GROUP_VALUE_MAX)) {
		DISPATCH_CLIENT_CRASH(old_bits,
				"Too many nested calls to dispatch_group_enter()"); }}Copy the code

And dispatch_group_leave


void
dispatch_group_leave(dispatch_group_t dg)
{
	// The value is incremented on a 64bits wide atomic so that the carry for
	// the -1 -> 0 transition increments the generation atomically.
	uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state,
			DISPATCH_GROUP_VALUE_INTERVAL, release);
	uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK);

	if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) {
		old_state += DISPATCH_GROUP_VALUE_INTERVAL;
		do {
			new_state = old_state;
			if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) {
				new_state &= ~DISPATCH_GROUP_HAS_WAITERS;
				new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
			} else {
				// If the group was entered again since the atomic_add above,
				// we can't clear the waiters bit anymore as we don't know for
				// which generation the waiters are for
				new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
			}
			if (old_state == new_state) break;
		} while(unlikely(! os_atomic_cmpxchgv2o(dg, dg_state, old_state, new_state, &old_state, relaxed)));return _dispatch_group_wake(dg, old_state, true);
	}

	if (unlikely(old_value == 0)) {
		DISPATCH_CLIENT_CRASH((uintptr_t)old_value,
				"Unbalanced call to dispatch_group_leave()"); }}Copy the code

“Os_atomic_sub_orig2o” is the “dispatch_group_enter” operation. #define DISPATCH_GROUP_VALUE_1 DISPATCH_GROUP_VALUE_MASK If (Unlikely (OLd_value == DISPATCH_GROUP_VALUE_1)) {old_state += DISPATCH_GROUP_VALUE_INTERVAL; And _dispatch_group_wake (dg, old_state, true); _dispatch_group_wake invokes the _dispatch_group_notify function.

Dispatch_group_enter and dispatch_group_leave block execution of _dispatch_group_notify

static inline void
_dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, dispatch_continuation_t dsn)
{
	uint64_t old_state, new_state;
	dispatch_continuation_t prev;

	dsn->dc_data = dq;
	_dispatch_retain(dq);

	prev = os_mpsc_push_update_tail(os_mpsc(dg, dg_notify), dsn, do_next);
	if (os_mpsc_push_was_empty(prev)) _dispatch_retain(dg);
	os_mpsc_push_update_prev(os_mpsc(dg, dg_notify), prev, dsn, do_next);
	if (os_mpsc_push_was_empty(prev)) {
		os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, release, {
			new_state = old_state | DISPATCH_GROUP_HAS_NOTIFS;
			if ((uint32_t)old_state == 0) {
				os_atomic_rmw_loop_give_up({
					return _dispatch_group_wake(dg, new_state, false); }); }}); }}Copy the code

If old_state == 0 then _dispatch_group_wake is executed. If there is also a wake function within dispatch_group_leave, then the dispatch_group_notify function binds a block to a group. For asynchronous functions, dispatch_group_notify may be executed directly, but dispatch_group_leave is not returned, and when dispatch_group_leave is finished, there is also a wake up call. Because dispatch_group_notify has bound a block to a group, dispatch_group_leave can also be invoked.

Why is dispatch_group_async equal to dispatch_group_Enter and dispatch_group_leave? Is dispatch_group_Enter and dispatch_group_leave encapsulated in the bottom layer? Let’s look at the source code:

void
dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db)
{
	dispatch_continuation_t dc = _dispatch_continuation_alloc();
	uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_GROUP_ASYNC;
	dispatch_qos_t qos;

	qos = _dispatch_continuation_init(dc, dq, db, 0, dc_flags);
	_dispatch_continuation_group_async(dg, dq, dc, qos);
}
Copy the code

So let’s look at _dispatch_continuation_group_async

static inline void
_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq, dispatch_continuation_t dc, dispatch_qos_t qos)
{
	dispatch_group_enter(dg);
	dc->dc_data = dg;
	_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}
Copy the code

Where’s the dispatch_group_leave function? It should be after the block is done. We continue to trace the source code through a series of call stacks, culminating in the following function

static inline void
_dispatch_continuation_invoke_inline(dispatch_object_t dou, dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu){...if (unlikely(dc_flags & DC_FLAG_GROUP_ASYNC)) {
			_dispatch_continuation_with_group_invoke(dc);
		} else {
			_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
			_dispatch_trace_item_complete(dc);
		}
		if(unlikely(dc1)) { _dispatch_continuation_free_to_cache_limit(dc1); }}); _dispatch_perfmon_workitem_inc(); }Copy the code

_dispatch_continuation_with_group_invoke is implemented as follows:

static inline void
_dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
{
	struct dispatch_object_s *dou = dc->dc_data;
	unsigned long type = dx_type(dou);
	if (type == DISPATCH_GROUP_TYPE) {
		_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
		_dispatch_trace_item_complete(dc);
		dispatch_group_leave((dispatch_group_t)dou);
	} else {
		DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type"); }}Copy the code

Therefore, dispatch_group_async encapsulates dispatch_group_Enter and dispatch_group_leave.