project:build.sh: Added fastboot support; custom modifications to U-Boot and kernel implemented using patches.

project:cfg:BoardConfig_IPC: Added fastboot BoardConfig file and firmware post-scripts, distinguishing between
the BoardConfigs for Luckfox Pico Pro and Luckfox Pico Max. project:app: Added fastboot_client and rk_smart_door
for quick boot applications; updated rkipc app to adapt to the latest media library. media:samples: Added more
usage examples. media:rockit: Fixed bugs; removed support for retrieving data frames from VPSS. media:isp:
Updated rkaiq library and related tools to support connection to RKISP_Tuner. sysdrv:Makefile: Added support for
compiling drv_ko on Luckfox Pico Ultra W using Ubuntu; added support for custom root filesystem.
sysdrv:tools:board: Updated Buildroot optional mirror sources, updated some software versions, and stored device
tree files and configuration files that undergo multiple modifications for U-Boot and kernel separately.
sysdrv:source:mcu: Used RISC-V MCU SDK with RT-Thread system, mainly for initializing camera AE during quick
boot. sysdrv:source:uboot: Added support for fastboot; added high baud rate DDR bin for serial firmware upgrades.
sysdrv:source:kernel: Upgraded to version 5.10.160; increased NPU frequency for RV1106G3; added support for
fastboot.

Signed-off-by: luckfox-eng29 <eng29@luckfox.com>
This commit is contained in:
luckfox-eng29
2024-08-21 10:05:47 +08:00
parent e79fd21975
commit 8f34c2760d
20902 changed files with 6567362 additions and 11248383 deletions

View File

@@ -29,6 +29,6 @@ void xen_setup_features(void)
if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
break;
for (j = 0; j < 32; j++)
xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
xen_features[i * 32 + j] = !!(fi.submap & 1U << j);
}
}

View File

@@ -16,6 +16,7 @@
#include <linux/mmu_notifier.h>
#include <linux/types.h>
#include <xen/interface/event_channel.h>
#include <xen/grant_table.h>
struct gntdev_dmabuf_priv;
@@ -43,9 +44,10 @@ struct gntdev_unmap_notify {
};
struct gntdev_grant_map {
atomic_t in_use;
struct mmu_interval_notifier notifier;
bool notifier_init;
struct list_head next;
struct vm_area_struct *vma;
int index;
int count;
int flags;
@@ -56,6 +58,7 @@ struct gntdev_grant_map {
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
struct gnttab_unmap_grant_ref *kunmap_ops;
bool *being_removed;
struct page **pages;
unsigned long pages_vm_start;
@@ -73,6 +76,11 @@ struct gntdev_grant_map {
/* Needed to avoid allocation in gnttab_dma_free_pages(). */
xen_pfn_t *frames;
#endif
/* Number of live grants */
atomic_t live_grants;
/* Needed to avoid allocation in __unmap_grant_pages */
struct gntab_unmap_queue_data unmap_data;
};
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,

View File

@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -60,10 +61,11 @@ module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit,
"Maximum number of grants that may be mapped by one mapping request");
/* True in PV mode, false otherwise */
static int use_ptemod;
static int unmap_grant_pages(struct gntdev_grant_map *map,
int offset, int pages);
static void unmap_grant_pages(struct gntdev_grant_map *map,
int offset, int pages);
static struct miscdevice gntdev_miscdev;
@@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
kvfree(map->unmap_ops);
kvfree(map->kmap_ops);
kvfree(map->kunmap_ops);
kvfree(map->being_removed);
kfree(map);
}
@@ -140,12 +143,15 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
add->kunmap_ops = kvcalloc(count,
sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
add->being_removed =
kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
NULL == add->kmap_ops ||
NULL == add->kunmap_ops ||
NULL == add->pages)
NULL == add->pages ||
NULL == add->being_removed)
goto err;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
@@ -240,9 +246,39 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
if (map->pages && !use_ptemod)
if (map->pages && !use_ptemod) {
/*
* Increment the reference count. This ensures that the
* subsequent call to unmap_grant_pages() will not wind up
* re-entering itself. It *can* wind up calling
* gntdev_put_map() recursively, but such calls will be with a
* reference count greater than 1, so they will return before
* this code is reached. The recursion depth is thus limited to
* 1. Do NOT use refcount_inc() here, as it will detect that
* the reference count is zero and WARN().
*/
refcount_set(&map->users, 1);
/*
* Unmap the grants. This may or may not be asynchronous, so it
* is possible that the reference count is 1 on return, but it
* could also be greater than 1.
*/
unmap_grant_pages(map, 0, map->count);
/* Check if the memory now needs to be freed */
if (!refcount_dec_and_test(&map->users))
return;
/*
* All pages have been returned to the hypervisor, so free the
* map.
*/
}
if (use_ptemod && map->notifier_init)
mmu_interval_notifier_remove(&map->notifier);
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@@ -255,21 +291,14 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
{
struct gntdev_grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT;
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
(1 << _GNTMAP_guest_avail0);
u64 pte_maddr;
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
/*
* Set the PTE as special to force get_user_pages_fast() fall
* back to the slow path. If this is not supported as part of
* the grant map, it will be done afterwards.
*/
if (xen_feature(XENFEAT_gnttab_map_avail_bits))
flags |= (1 << _GNTMAP_guest_avail0);
gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
@@ -278,16 +307,9 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
return 0;
}
#ifdef CONFIG_X86
static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
{
set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
return 0;
}
#endif
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
size_t alloced = 0;
int i, err = 0;
if (!use_ptemod) {
@@ -336,87 +358,130 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
map->pages, map->count);
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status == GNTST_okay)
if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
else if (!err)
alloced++;
} else if (!err)
err = -EINVAL;
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
if (use_ptemod) {
if (map->kmap_ops[i].status == GNTST_okay)
if (map->kmap_ops[i].status == GNTST_okay) {
alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
else if (!err)
} else if (!err)
err = -EINVAL;
}
}
atomic_add(alloced, &map->live_grants);
return err;
}
static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
static void __unmap_grant_pages_done(int result,
struct gntab_unmap_queue_data *data)
{
unsigned int i;
struct gntdev_grant_map *map = data->data;
unsigned int offset = data->unmap_ops - map->unmap_ops;
int successful_unmaps = 0;
int live_grants;
for (i = 0; i < data->count; i++) {
if (map->unmap_ops[offset + i].status == GNTST_okay &&
map->unmap_ops[offset + i].handle != -1)
successful_unmaps++;
WARN_ON(map->unmap_ops[offset+i].status &&
map->unmap_ops[offset+i].handle != -1);
pr_debug("unmap handle=%d st=%d\n",
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = -1;
if (use_ptemod) {
if (map->kunmap_ops[offset + i].status == GNTST_okay &&
map->kunmap_ops[offset + i].handle != -1)
successful_unmaps++;
WARN_ON(map->kunmap_ops[offset+i].status &&
map->kunmap_ops[offset+i].handle != -1);
pr_debug("kunmap handle=%u st=%d\n",
map->kunmap_ops[offset+i].handle,
map->kunmap_ops[offset+i].status);
map->kunmap_ops[offset+i].handle = -1;
}
}
/*
* Decrease the live-grant counter. This must happen after the loop to
* prevent premature reuse of the grants by gnttab_mmap().
*/
live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
if (WARN_ON(live_grants < 0))
pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
__func__, live_grants, successful_unmaps);
/* Release reference taken by __unmap_grant_pages */
gntdev_put_map(NULL, map);
}
static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
int pages)
{
int i, err = 0;
struct gntab_unmap_queue_data unmap_data;
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
if (pgno >= offset && pgno < offset + pages) {
/* No need for kmap, pages are in lowmem */
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
}
}
unmap_data.unmap_ops = map->unmap_ops + offset;
unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
unmap_data.pages = map->pages + offset;
unmap_data.count = pages;
map->unmap_data.unmap_ops = map->unmap_ops + offset;
map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
map->unmap_data.pages = map->pages + offset;
map->unmap_data.count = pages;
map->unmap_data.done = __unmap_grant_pages_done;
map->unmap_data.data = map;
refcount_inc(&map->users); /* to keep map alive during async call below */
err = gnttab_unmap_refs_sync(&unmap_data);
if (err)
return err;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
err = -EINVAL;
pr_debug("unmap handle=%d st=%d\n",
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = -1;
}
return err;
gnttab_unmap_refs_async(&map->unmap_data);
}
static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
int pages)
static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
int pages)
{
int range, err = 0;
int range;
if (atomic_read(&map->live_grants) == 0)
return; /* Nothing to do */
pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants. Only unmap valid ranges.
*/
while (pages && !err) {
while (pages && map->unmap_ops[offset].handle == -1) {
while (pages) {
while (pages && map->being_removed[offset]) {
offset++;
pages--;
}
range = 0;
while (range < pages) {
if (map->unmap_ops[offset+range].handle == -1)
if (map->being_removed[offset + range])
break;
map->being_removed[offset + range] = true;
range++;
}
err = __unmap_grant_pages(map, offset, range);
if (range)
__unmap_grant_pages(map, offset, range);
offset += range;
pages -= range;
}
return err;
}
/* ------------------------------------------------------------------ */
@@ -436,11 +501,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
struct gntdev_priv *priv = file->private_data;
pr_debug("gntdev_vma_close %p\n", vma);
if (use_ptemod) {
WARN_ON(map->vma != vma);
mmu_interval_notifier_remove(&map->notifier);
map->vma = NULL;
}
vma->vm_private_data = NULL;
gntdev_put_map(priv, map);
}
@@ -468,31 +529,30 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
struct gntdev_grant_map *map =
container_of(mn, struct gntdev_grant_map, notifier);
unsigned long mstart, mend;
int err;
unsigned long map_start, map_end;
if (!mmu_notifier_range_blockable(range))
return false;
map_start = map->pages_vm_start;
map_end = map->pages_vm_start + (map->count << PAGE_SHIFT);
/*
* If the VMA is split or otherwise changed the notifier is not
* updated, but we don't want to process VA's outside the modified
* VMA. FIXME: It would be much more understandable to just prevent
* modifying the VMA in the first place.
*/
if (map->vma->vm_start >= range->end ||
map->vma->vm_end <= range->start)
if (map_start >= range->end || map_end <= range->start)
return true;
mstart = max(range->start, map->vma->vm_start);
mend = min(range->end, map->vma->vm_end);
mstart = max(range->start, map_start);
mend = min(range->end, map_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
map->index, map->count,
map->vma->vm_start, map->vma->vm_end,
range->start, range->end, mstart, mend);
err = unmap_grant_pages(map,
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
WARN_ON(err);
map->index, map->count, map_start, map_end,
range->start, range->end, mstart, mend);
unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
return true;
}
@@ -972,14 +1032,15 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
return -EINVAL;
pr_debug("map %d+%d at %lx (pgoff %lx)\n",
index, count, vma->vm_start, vma->vm_pgoff);
index, count, vma->vm_start, vma->vm_pgoff);
mutex_lock(&priv->lock);
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
if (use_ptemod && map->vma)
if (!atomic_add_unless(&map->in_use, 1, 1))
goto unlock_out;
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
@@ -1000,15 +1061,16 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map->flags |= GNTMAP_readonly;
}
map->pages_vm_start = vma->vm_start;
if (use_ptemod) {
map->vma = vma;
err = mmu_interval_notifier_insert_locked(
&map->notifier, vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
if (err) {
map->vma = NULL;
if (err)
goto out_unlock_put;
}
map->notifier_init = true;
}
mutex_unlock(&priv->lock);
@@ -1025,7 +1087,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
*/
mmu_interval_read_begin(&map->notifier);
map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@@ -1043,23 +1104,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
err = vm_map_pages_zero(vma, map->pages, map->count);
if (err)
goto out_put_map;
} else {
#ifdef CONFIG_X86
/*
* If the PTEs were not made special by the grant map
* hypercall, do so here.
*
* This is racy since the mapping is already visible
* to userspace but userspace should be well-behaved
* enough to not touch it until the mmap() call
* returns.
*/
if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
set_grant_ptes_as_special, NULL);
}
#endif
}
return 0;
@@ -1071,13 +1115,8 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
if (use_ptemod) {
if (use_ptemod)
unmap_grant_pages(map, 0, map->count);
if (map->vma) {
mmu_interval_notifier_remove(&map->notifier);
map->vma = NULL;
}
}
gntdev_put_map(priv, map);
return err;
}

View File

@@ -981,6 +981,9 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
size_t size;
int i, ret;
if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
return -ENOMEM;
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
args->vaddr = dma_alloc_coherent(args->dev, size,

View File

@@ -228,7 +228,7 @@ static int register_pcpu(struct pcpu *pcpu)
err = device_register(dev);
if (err) {
pcpu_release(dev);
put_device(dev);
return err;
}

View File

@@ -137,7 +137,7 @@ static int platform_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_warn(&pdev->dev, "Unable to set the evtchn callback "
"err=%d\n", ret);
goto out;
goto irq_out;
}
}
@@ -145,13 +145,16 @@ static int platform_pci_probe(struct pci_dev *pdev,
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames);
if (ret)
goto out;
goto irq_out;
ret = gnttab_init();
if (ret)
goto grant_out;
return 0;
grant_out:
gnttab_free_auto_xlat_frames();
irq_out:
if (!xen_have_vector_callback)
free_irq(pdev->irq, pdev);
out:
pci_release_region(pdev, 0);
mem_out:

View File

@@ -581,27 +581,30 @@ static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{
unsigned int i;
unsigned int i, off = 0;
for (i = 0; i < num; i++) {
for (i = 0; i < num; ) {
unsigned int requested;
int page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
PAGE_SIZE);
PAGE_SIZE) - off;
if (requested > nr_pages)
return -ENOSPC;
page_count = pin_user_pages_fast(
(unsigned long) kbufs[i].uptr,
(unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
requested, FOLL_WRITE, pages);
if (page_count < 0)
return page_count;
if (page_count <= 0)
return page_count ? : -EFAULT;
*pinned += page_count;
nr_pages -= page_count;
pages += page_count;
off = (requested == page_count) ? 0 : off + page_count;
i += !off;
}
return 0;
@@ -677,10 +680,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
}
rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
if (rc < 0) {
nr_pages = pinned;
if (rc < 0)
goto out;
}
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
@@ -692,7 +693,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
xen_preemptible_hcall_end();
out:
unlock_pages(pages, nr_pages);
unlock_pages(pages, pinned);
kfree(xbufs);
kfree(pages);
kfree(kbufs);

View File

@@ -190,13 +190,16 @@ static const struct config_field caplist_pm[] = {
};
static struct msi_msix_field_config {
u16 enable_bit; /* bit for enabling MSI/MSI-X */
unsigned int int_type; /* interrupt type for exclusiveness check */
u16 enable_bit; /* bit for enabling MSI/MSI-X */
u16 allowed_bits; /* bits allowed to be changed */
unsigned int int_type; /* interrupt type for exclusiveness check */
} msi_field_config = {
.enable_bit = PCI_MSI_FLAGS_ENABLE,
.allowed_bits = PCI_MSI_FLAGS_ENABLE,
.int_type = INTERRUPT_TYPE_MSI,
}, msix_field_config = {
.enable_bit = PCI_MSIX_FLAGS_ENABLE,
.allowed_bits = PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL,
.int_type = INTERRUPT_TYPE_MSIX,
};
@@ -229,7 +232,7 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
return 0;
if (!dev_data->allow_interrupt_control ||
(new_value ^ old_value) & ~field_config->enable_bit)
(new_value ^ old_value) & ~field_config->allowed_bits)
return PCIBIOS_SET_FAILED;
if (new_value & field_config->enable_bit) {

View File

@@ -128,7 +128,7 @@ static ssize_t xenbus_file_read(struct file *filp,
{
struct xenbus_file_priv *u = filp->private_data;
struct read_buffer *rb;
unsigned i;
ssize_t i;
int ret;
mutex_lock(&u->reply_mutex);
@@ -148,7 +148,7 @@ again:
rb = list_entry(u->read_buffers.next, struct read_buffer, list);
i = 0;
while (i < len) {
unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);

View File

@@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
return 0;
}
EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
struct remap_pfn {
struct mm_struct *mm;