diff mbox series

[v4,2/5] x86/sgx: Reduce the locking range in sgx_sanitize_section()

Message ID 20210201132653.35690-3-tianjia.zhang@linux.alibaba.com
State New
Headers show
Series Some optimizations related to sgx | expand

Commit Message

tianjia.zhang Feb. 1, 2021, 1:26 p.m. UTC
The spin lock of sgx_epc_section only locks the page_list. The
EREMOVE operation and init_laundry_list is not necessary in the
protection range of the spin lock. This patch reduces the lock
range of the spin lock in the function sgx_sanitize_section()
and only protects the operation of the page_list.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
---
 arch/x86/kernel/cpu/sgx/main.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

Comments

Jarkko Sakkinen Feb. 2, 2021, 10 p.m. UTC | #1
On Mon, Feb 01, 2021 at 09:26:50PM +0800, Tianjia Zhang wrote:
> The spin lock of sgx_epc_section only locks the page_list. The

> EREMOVE operation and init_laundry_list is not necessary in the

> protection range of the spin lock. This patch reduces the lock

> range of the spin lock in the function sgx_sanitize_section()

> and only protects the operation of the page_list.

> 

> Suggested-by: Sean Christopherson <seanjc@google.com>

> Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>


I'm not confident that this change has any practical value.

/Jarkko

> ---

>  arch/x86/kernel/cpu/sgx/main.c | 11 ++++-------

>  1 file changed, 4 insertions(+), 7 deletions(-)

> 

> diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c

> index c519fc5f6948..4465912174fd 100644

> --- a/arch/x86/kernel/cpu/sgx/main.c

> +++ b/arch/x86/kernel/cpu/sgx/main.c

> @@ -41,20 +41,17 @@ static void sgx_sanitize_section(struct sgx_epc_section *section)

>  		if (kthread_should_stop())

>  			return;

>  

> -		/* needed for access to ->page_list: */

> -		spin_lock(&section->lock);

> -

>  		page = list_first_entry(&section->init_laundry_list,

>  					struct sgx_epc_page, list);

>  

>  		ret = __eremove(sgx_get_epc_virt_addr(page));

> -		if (!ret)

> +		if (!ret) {

> +			spin_lock(&section->lock);

>  			list_move(&page->list, &section->page_list);

> -		else

> +			spin_unlock(&section->lock);

> +		} else

>  			list_move_tail(&page->list, &dirty);

>  

> -		spin_unlock(&section->lock);

> -

>  		cond_resched();

>  	}

>  

> -- 

> 2.19.1.3.ge56e4f7

> 

>
tianjia.zhang Feb. 11, 2021, 6:15 a.m. UTC | #2
On 2/3/21 6:00 AM, Jarkko Sakkinen wrote:
> On Mon, Feb 01, 2021 at 09:26:50PM +0800, Tianjia Zhang wrote:

>> The spin lock of sgx_epc_section only locks the page_list. The

>> EREMOVE operation and init_laundry_list is not necessary in the

>> protection range of the spin lock. This patch reduces the lock

>> range of the spin lock in the function sgx_sanitize_section()

>> and only protects the operation of the page_list.

>>

>> Suggested-by: Sean Christopherson <seanjc@google.com>

>> Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>

> 

> I'm not confident that this change has any practical value.

> 

> /Jarkko

> 


As a process executed during initialization, this optimization effect 
may not be obvious. If possible, this critical area can be moved outside 
to protect the entire while loop.

Best regards,
Tianjia
>> ---

>>   arch/x86/kernel/cpu/sgx/main.c | 11 ++++-------

>>   1 file changed, 4 insertions(+), 7 deletions(-)

>>

>> diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c

>> index c519fc5f6948..4465912174fd 100644

>> --- a/arch/x86/kernel/cpu/sgx/main.c

>> +++ b/arch/x86/kernel/cpu/sgx/main.c

>> @@ -41,20 +41,17 @@ static void sgx_sanitize_section(struct sgx_epc_section *section)

>>   		if (kthread_should_stop())

>>   			return;

>>   

>> -		/* needed for access to ->page_list: */

>> -		spin_lock(&section->lock);

>> -

>>   		page = list_first_entry(&section->init_laundry_list,

>>   					struct sgx_epc_page, list);

>>   

>>   		ret = __eremove(sgx_get_epc_virt_addr(page));

>> -		if (!ret)

>> +		if (!ret) {

>> +			spin_lock(&section->lock);

>>   			list_move(&page->list, &section->page_list);

>> -		else

>> +			spin_unlock(&section->lock);

>> +		} else

>>   			list_move_tail(&page->list, &dirty);

>>   

>> -		spin_unlock(&section->lock);

>> -

>>   		cond_resched();

>>   	}

>>   

>> -- 

>> 2.19.1.3.ge56e4f7

>>

>>
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index c519fc5f6948..4465912174fd 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -41,20 +41,17 @@  static void sgx_sanitize_section(struct sgx_epc_section *section)
 		if (kthread_should_stop())
 			return;
 
-		/* needed for access to ->page_list: */
-		spin_lock(&section->lock);
-
 		page = list_first_entry(&section->init_laundry_list,
 					struct sgx_epc_page, list);
 
 		ret = __eremove(sgx_get_epc_virt_addr(page));
-		if (!ret)
+		if (!ret) {
+			spin_lock(&section->lock);
 			list_move(&page->list, &section->page_list);
-		else
+			spin_unlock(&section->lock);
+		} else
 			list_move_tail(&page->list, &dirty);
 
-		spin_unlock(&section->lock);
-
 		cond_resched();
 	}