mirror of
https://github.com/LuckfoxTECH/luckfox-pico.git
synced 2026-01-19 09:52:31 +01:00
project:build.sh: Added fastboot support; custom modifications to U-Boot and kernel implemented using patches.
project:cfg:BoardConfig_IPC: Added fastboot BoardConfig file and firmware post-scripts, distinguishing between the BoardConfigs for Luckfox Pico Pro and Luckfox Pico Max. project:app: Added fastboot_client and rk_smart_door for quick boot applications; updated rkipc app to adapt to the latest media library. media:samples: Added more usage examples. media:rockit: Fixed bugs; removed support for retrieving data frames from VPSS. media:isp: Updated rkaiq library and related tools to support connection to RKISP_Tuner. sysdrv:Makefile: Added support for compiling drv_ko on Luckfox Pico Ultra W using Ubuntu; added support for custom root filesystem. sysdrv:tools:board: Updated Buildroot optional mirror sources, updated some software versions, and stored device tree files and configuration files that undergo multiple modifications for U-Boot and kernel separately. sysdrv:source:mcu: Used RISC-V MCU SDK with RT-Thread system, mainly for initializing camera AE during quick boot. sysdrv:source:uboot: Added support for fastboot; added high baud rate DDR bin for serial firmware upgrades. sysdrv:source:kernel: Upgraded to version 5.10.160; increased NPU frequency for RV1106G3; added support for fastboot. Signed-off-by: luckfox-eng29 <eng29@luckfox.com>
This commit is contained in:
@@ -126,10 +126,11 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
struct cma_heap_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
|
||||
if (buffer->vmap_cnt)
|
||||
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
if (!a->mapped)
|
||||
continue;
|
||||
@@ -146,10 +147,11 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
struct cma_heap_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
|
||||
if (buffer->vmap_cnt)
|
||||
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
if (!a->mapped)
|
||||
continue;
|
||||
|
||||
@@ -11,10 +11,16 @@
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include "page_pool.h"
|
||||
|
||||
struct dmabuf_page_pool_with_spinlock {
|
||||
struct dmabuf_page_pool pool;
|
||||
struct spinlock spinlock;
|
||||
};
|
||||
|
||||
static LIST_HEAD(pool_list);
|
||||
static DEFINE_MUTEX(pool_list_lock);
|
||||
|
||||
@@ -35,34 +41,41 @@ static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
|
||||
static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
|
||||
{
|
||||
int index;
|
||||
struct dmabuf_page_pool_with_spinlock *container_pool =
|
||||
container_of(pool, struct dmabuf_page_pool_with_spinlock, pool);
|
||||
|
||||
if (PageHighMem(page))
|
||||
index = POOL_HIGHPAGE;
|
||||
else
|
||||
index = POOL_LOWPAGE;
|
||||
|
||||
mutex_lock(&pool->mutex);
|
||||
spin_lock(&container_pool->spinlock);
|
||||
list_add_tail(&page->lru, &pool->items[index]);
|
||||
pool->count[index]++;
|
||||
spin_unlock(&container_pool->spinlock);
|
||||
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
|
||||
1 << pool->order);
|
||||
mutex_unlock(&pool->mutex);
|
||||
}
|
||||
|
||||
static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
|
||||
{
|
||||
struct page *page;
|
||||
struct dmabuf_page_pool_with_spinlock *container_pool =
|
||||
container_of(pool, struct dmabuf_page_pool_with_spinlock, pool);
|
||||
|
||||
mutex_lock(&pool->mutex);
|
||||
spin_lock(&container_pool->spinlock);
|
||||
page = list_first_entry_or_null(&pool->items[index], struct page, lru);
|
||||
if (page) {
|
||||
pool->count[index]--;
|
||||
list_del(&page->lru);
|
||||
spin_unlock(&container_pool->spinlock);
|
||||
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
|
||||
-(1 << pool->order));
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&pool->mutex);
|
||||
spin_unlock(&container_pool->spinlock);
|
||||
|
||||
out:
|
||||
return page;
|
||||
}
|
||||
|
||||
@@ -113,19 +126,25 @@ static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
|
||||
|
||||
struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
|
||||
{
|
||||
struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
|
||||
struct dmabuf_page_pool *pool;
|
||||
struct dmabuf_page_pool_with_spinlock *container_pool =
|
||||
kmalloc(sizeof(*container_pool), GFP_KERNEL);
|
||||
int i;
|
||||
|
||||
if (!pool)
|
||||
if (!container_pool)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&container_pool->spinlock);
|
||||
pool = &container_pool->pool;
|
||||
|
||||
for (i = 0; i < POOL_TYPE_SIZE; i++) {
|
||||
pool->count[i] = 0;
|
||||
INIT_LIST_HEAD(&pool->items[i]);
|
||||
}
|
||||
pool->gfp_mask = gfp_mask | __GFP_COMP;
|
||||
pool->order = order;
|
||||
mutex_init(&pool->mutex);
|
||||
mutex_init(&pool->mutex); /* No longer used! */
|
||||
mutex_lock(&pool->mutex); /* Make sure anyone who attempts to acquire this hangs */
|
||||
|
||||
mutex_lock(&pool_list_lock);
|
||||
list_add(&pool->list, &pool_list);
|
||||
@@ -138,6 +157,7 @@ EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
|
||||
void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
|
||||
{
|
||||
struct page *page;
|
||||
struct dmabuf_page_pool_with_spinlock *container_pool;
|
||||
int i;
|
||||
|
||||
/* Remove us from the pool list */
|
||||
@@ -151,7 +171,8 @@ void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
|
||||
dmabuf_page_pool_free_pages(pool, page);
|
||||
}
|
||||
|
||||
kfree(pool);
|
||||
container_pool = container_of(pool, struct dmabuf_page_pool_with_spinlock, pool);
|
||||
kfree(container_pool);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ enum {
|
||||
struct dmabuf_page_pool {
|
||||
int count[POOL_TYPE_SIZE];
|
||||
struct list_head items[POOL_TYPE_SIZE];
|
||||
struct mutex mutex;
|
||||
struct mutex mutex; /* No longer used! */
|
||||
gfp_t gfp_mask;
|
||||
unsigned int order;
|
||||
struct list_head list;
|
||||
|
||||
@@ -135,7 +135,7 @@ static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
dma_unmap_sgtable(attachment->dev, table, direction, attrs);
|
||||
}
|
||||
|
||||
static int
|
||||
static int __maybe_unused
|
||||
cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
@@ -160,7 +160,7 @@ cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __maybe_unused
|
||||
cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
@@ -334,8 +334,10 @@ static const struct dma_buf_ops cma_heap_buf_ops = {
|
||||
.unmap_dma_buf = cma_heap_unmap_dma_buf,
|
||||
.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
|
||||
.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
|
||||
#ifdef CONFIG_DMABUF_PARTIAL
|
||||
.begin_cpu_access_partial = cma_heap_dma_buf_begin_cpu_access_partial,
|
||||
.end_cpu_access_partial = cma_heap_dma_buf_end_cpu_access_partial,
|
||||
#endif
|
||||
.mmap = cma_heap_mmap,
|
||||
.vmap = cma_heap_vmap,
|
||||
.vunmap = cma_heap_vunmap,
|
||||
|
||||
@@ -58,12 +58,11 @@ struct dma_heap_attachment {
|
||||
bool uncached;
|
||||
};
|
||||
|
||||
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
|
||||
#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
|
||||
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
|
||||
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
|
||||
| __GFP_NORETRY) & ~__GFP_RECLAIM) \
|
||||
| __GFP_COMP)
|
||||
static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
|
||||
static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
|
||||
/*
|
||||
* The selection of the orders used for allocation (1MB, 64K, 4K) is designed
|
||||
* to match with the sizes often found in IOMMUs. Using order 4 pages instead
|
||||
@@ -266,7 +265,7 @@ static int system_heap_sgl_sync_range(struct device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __maybe_unused
|
||||
system_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
@@ -296,7 +295,7 @@ system_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __maybe_unused
|
||||
system_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
@@ -479,8 +478,10 @@ static const struct dma_buf_ops system_heap_buf_ops = {
|
||||
.unmap_dma_buf = system_heap_unmap_dma_buf,
|
||||
.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
|
||||
.end_cpu_access = system_heap_dma_buf_end_cpu_access,
|
||||
#ifdef CONFIG_DMABUF_PARTIAL
|
||||
.begin_cpu_access_partial = system_heap_dma_buf_begin_cpu_access_partial,
|
||||
.end_cpu_access_partial = system_heap_dma_buf_end_cpu_access_partial,
|
||||
#endif
|
||||
.mmap = system_heap_mmap,
|
||||
.vmap = system_heap_vmap,
|
||||
.vunmap = system_heap_vunmap,
|
||||
|
||||
@@ -49,12 +49,11 @@ struct dma_heap_attachment {
|
||||
bool uncached;
|
||||
};
|
||||
|
||||
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
|
||||
#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
|
||||
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
|
||||
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
|
||||
| __GFP_NORETRY) & ~__GFP_RECLAIM) \
|
||||
| __GFP_COMP)
|
||||
static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
|
||||
static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
|
||||
/*
|
||||
* The selection of the orders used for allocation (1MB, 64K, 4K) is designed
|
||||
* to match with the sizes often found in IOMMUs. Using order 4 pages instead
|
||||
|
||||
Reference in New Issue
Block a user