project:build.sh: Added fastboot support; custom modifications to U-Boot and kernel implemented using patches.

project:cfg:BoardConfig_IPC: Added fastboot BoardConfig file and firmware post-scripts, distinguishing between
the BoardConfigs for Luckfox Pico Pro and Luckfox Pico Max. project:app: Added fastboot_client and rk_smart_door
for quick boot applications; updated rkipc app to adapt to the latest media library. media:samples: Added more
usage examples. media:rockit: Fixed bugs; removed support for retrieving data frames from VPSS. media:isp:
Updated rkaiq library and related tools to support connection to RKISP_Tuner. sysdrv:Makefile: Added support for
compiling drv_ko on Luckfox Pico Ultra W using Ubuntu; added support for custom root filesystem.
sysdrv:tools:board: Updated Buildroot optional mirror sources, updated some software versions, and stored device
tree files and configuration files that undergo multiple modifications for U-Boot and kernel separately.
sysdrv:source:mcu: Used RISC-V MCU SDK with RT-Thread system, mainly for initializing camera AE during quick
boot. sysdrv:source:uboot: Added support for fastboot; added high baud rate DDR bin for serial firmware upgrades.
sysdrv:source:kernel: Upgraded to version 5.10.160; increased NPU frequency for RV1106G3; added support for
fastboot.

Signed-off-by: luckfox-eng29 <eng29@luckfox.com>
This commit is contained in:
luckfox-eng29
2024-08-21 10:05:47 +08:00
parent e79fd21975
commit 8f34c2760d
20902 changed files with 6567362 additions and 11248383 deletions

View File

@@ -10,8 +10,8 @@ config CRYPTO_DEV_ROCKCHIP_V2
default y if CPU_RV1126 || CPU_RK1808 || CPU_RK3308 || CPU_PX30 || CPU_RK3568 || CPU_RK3588
config CRYPTO_DEV_ROCKCHIP_V3
bool "crypto v3/v4 for RV1106/RK3528"
default y if CPU_RV1106 || CPU_RK3528
bool "crypto v3/v4 for RV1106/RK3528/RK3562"
default y if CPU_RV1106 || CPU_RK3528 || CPU_RK3562
endif

View File

@@ -131,6 +131,8 @@ crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
return -EINVAL;
}
memset(&keys, 0x00, sizeof(keys));
switch (sop->cipher) {
case 0:
break;
@@ -322,8 +324,9 @@ crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
goto session_error;
}
/* Non-multithreaded can only create one session */
if (!rk_cryptodev_multi_thread(NULL) &&
/* Non-multithreaded can only create one session for hash */
if (ses_new->hdata.init &&
!rk_cryptodev_multi_thread(NULL) &&
!atomic_dec_and_test(&cryptodev_sess)) {
atomic_inc(&cryptodev_sess);
ddebug(2, "Non-multithreaded can only create one session. sess = %d",
@@ -377,6 +380,14 @@ crypto_destroy_session(struct csession *ses_ptr)
mutex_lock(&ses_ptr->sem);
}
ddebug(2, "Removed session 0x%08X", ses_ptr->sid);
/* Non-multithreaded can only create one session for hash */
if (ses_ptr->hdata.init &&
!rk_cryptodev_multi_thread(NULL)) {
atomic_inc(&cryptodev_sess);
ddebug(2, "Release cryptodev_sess = %d", atomic_read(&cryptodev_sess));
}
cryptodev_cipher_deinit(&ses_ptr->cdata);
cryptodev_hash_deinit(&ses_ptr->hdata);
ddebug(2, "freeing space for %d user pages", ses_ptr->array_size);
@@ -385,12 +396,6 @@ crypto_destroy_session(struct csession *ses_ptr)
mutex_unlock(&ses_ptr->sem);
mutex_destroy(&ses_ptr->sem);
kfree(ses_ptr);
/* Non-multithreaded can only create one session */
if (!rk_cryptodev_multi_thread(NULL)) {
atomic_inc(&cryptodev_sess);
ddebug(2, "Release cryptodev_sess = %d", atomic_read(&cryptodev_sess));
}
}
/* Look up a session by ID and remove. */
@@ -565,8 +570,6 @@ static void cryptask_routine(struct work_struct *work)
}
/* ====== /dev/crypto ====== */
static atomic_t cryptodev_node = ATOMIC_INIT(1);
static int
cryptodev_open(struct inode *inode, struct file *filp)
{
@@ -574,18 +577,6 @@ cryptodev_open(struct inode *inode, struct file *filp)
struct crypt_priv *pcr;
int i;
/* Non-multithreaded can only be opened once */
if (!rk_cryptodev_multi_thread(NULL) &&
!atomic_dec_and_test(&cryptodev_node)) {
atomic_inc(&cryptodev_node);
ddebug(2, "Non-multithreaded can only be opened once. node = %d",
atomic_read(&cryptodev_node));
return -EBUSY;
}
/* make sure sess == 1 after open */
atomic_set(&cryptodev_sess, 1);
pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
if (!pcr)
return -ENOMEM;
@@ -644,12 +635,6 @@ cryptodev_release(struct inode *inode, struct file *filp)
if (!pcr)
return 0;
/* Non-multithreaded can only be opened once */
if (!rk_cryptodev_multi_thread(NULL)) {
atomic_inc(&cryptodev_node);
ddebug(2, "Release cryptodev_node = %d", atomic_read(&cryptodev_node));
}
cancel_work_sync(&pcr->cryptask);
list_splice_tail(&pcr->todo.list, &pcr->free.list);

View File

@@ -636,7 +636,7 @@ static int crypto_rsa_run(struct fcrypt *fcr, struct kernel_crypt_rsa_op *krop)
const char *driver = "rsa-rk";
struct crypto_akcipher *tfm = NULL;
struct akcipher_request *req = NULL;
struct crypto_wait wait;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src, dst;
bool is_priv_key = (rop->flags & COP_FLAG_RSA_PRIV) == COP_FLAG_RSA_PRIV;
@@ -708,6 +708,9 @@ static int crypto_rsa_run(struct fcrypt *fcr, struct kernel_crypt_rsa_op *krop)
crypto_init_wait(&wait);
akcipher_request_set_crypt(req, &src, &dst, rop->in_len, out_len_max);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
switch (rop->op) {
case AOP_ENCRYPT:
ret = crypto_wait_req(crypto_akcipher_encrypt(req), &wait);

View File

@@ -90,7 +90,7 @@ int __cryptodev_get_userbuf(uint8_t __user *addr, uint32_t len, int write,
#else
mmap_read_unlock(mm);
#endif
if (ret != pgcount)
if (ret < 0 || ret != pgcount)
return -EINVAL;
sg_init_table(sg, pgcount);

View File

@@ -286,7 +286,6 @@ int rk_ahash_start(struct rk_crypto_dev *rk_dev)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct rk_crypto_algt *algt = rk_ahash_get_algt(tfm);
struct scatterlist *src_sg;
unsigned long flags;
unsigned int nbytes;
int ret = 0;
@@ -378,14 +377,12 @@ int rk_ahash_start(struct rk_crypto_dev *rk_dev)
ctx->hash_tmp_len, ctx->lastc_len, nbytes);
if (nbytes) {
spin_lock_irqsave(&rk_dev->lock, flags);
if (ctx->calc_cnt == 0)
alg_ctx->ops.hw_init(rk_dev, algt->algo, algt->type);
/* flush all 64byte key buffer for hmac */
alg_ctx->ops.hw_write_key(ctx->rk_dev, ctx->authkey, sizeof(ctx->authkey));
ret = rk_ahash_set_data_start(rk_dev, rctx->flag);
spin_unlock_irqrestore(&rk_dev->lock, flags);
}
exit:
return ret;

View File

@@ -272,27 +272,46 @@ static void start_irq_timer(struct rk_crypto_dev *rk_dev)
static void rk_crypto_irq_timer_handle(struct timer_list *t)
{
struct rk_crypto_dev *rk_dev = from_timer(rk_dev, t, timer);
unsigned long flags;
spin_lock_irqsave(&rk_dev->lock, flags);
rk_dev->err = -ETIMEDOUT;
rk_dev->stat.timeout_cnt++;
rk_unload_data(rk_dev);
spin_unlock_irqrestore(&rk_dev->lock, flags);
tasklet_schedule(&rk_dev->done_task);
}
static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
{
struct rk_crypto_dev *rk_dev = platform_get_drvdata(dev_id);
struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
struct rk_alg_ctx *alg_ctx;
unsigned long flags;
spin_lock(&rk_dev->lock);
spin_lock_irqsave(&rk_dev->lock, flags);
/* reset timeout timer */
start_irq_timer(rk_dev);
alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
rk_dev->stat.irq_cnt++;
if (alg_ctx->ops.irq_handle)
alg_ctx->ops.irq_handle(irq, dev_id);
tasklet_schedule(&rk_dev->done_task);
/* already trigger timeout */
if (rk_dev->err != -ETIMEDOUT) {
spin_unlock_irqrestore(&rk_dev->lock, flags);
tasklet_schedule(&rk_dev->done_task);
} else {
spin_unlock_irqrestore(&rk_dev->lock, flags);
}
spin_unlock(&rk_dev->lock);
return IRQ_HANDLED;
}
@@ -390,23 +409,22 @@ static void rk_crypto_queue_task_cb(unsigned long data)
struct crypto_async_request *async_req, *backlog;
unsigned long flags;
spin_lock_irqsave(&rk_dev->lock, flags);
if (rk_dev->async_req) {
dev_err(rk_dev->dev, "%s: Unexpected crypto paths.\n", __func__);
return;
goto exit;
}
rk_dev->err = 0;
spin_lock_irqsave(&rk_dev->lock, flags);
backlog = crypto_get_backlog(&rk_dev->queue);
async_req = crypto_dequeue_request(&rk_dev->queue);
if (!async_req) {
rk_dev->busy = false;
spin_unlock_irqrestore(&rk_dev->lock, flags);
return;
goto exit;
}
rk_dev->stat.dequeue_cnt++;
spin_unlock_irqrestore(&rk_dev->lock, flags);
if (backlog) {
backlog->complete(backlog, -EINPROGRESS);
@@ -417,12 +435,26 @@ static void rk_crypto_queue_task_cb(unsigned long data)
rk_dev->err = rk_start_op(rk_dev);
if (rk_dev->err)
rk_complete_op(rk_dev, rk_dev->err);
exit:
spin_unlock_irqrestore(&rk_dev->lock, flags);
}
static void rk_crypto_done_task_cb(unsigned long data)
{
struct rk_crypto_dev *rk_dev = (struct rk_crypto_dev *)data;
struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
struct rk_alg_ctx *alg_ctx;
unsigned long flags;
spin_lock_irqsave(&rk_dev->lock, flags);
if (!rk_dev->async_req) {
dev_err(rk_dev->dev, "done task receive invalid async_req\n");
spin_unlock_irqrestore(&rk_dev->lock, flags);
return;
}
alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
rk_dev->stat.done_cnt++;
@@ -440,9 +472,12 @@ static void rk_crypto_done_task_cb(unsigned long data)
if (rk_dev->err)
goto exit;
spin_unlock_irqrestore(&rk_dev->lock, flags);
return;
exit:
rk_complete_op(rk_dev, rk_dev->err);
spin_unlock_irqrestore(&rk_dev->lock, flags);
}
static struct rk_crypto_algt *rk_crypto_find_algs(struct rk_crypto_dev *rk_dev,

View File

@@ -331,7 +331,6 @@ int rk_ablk_start(struct rk_crypto_dev *rk_dev)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct rk_crypto_algt *algt = rk_cipher_get_algt(tfm);
struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
unsigned long flags;
int err = 0;
alg_ctx->left_bytes = req->cryptlen;
@@ -345,10 +344,9 @@ int rk_ablk_start(struct rk_crypto_dev *rk_dev)
CRYPTO_TRACE("total = %u", alg_ctx->total);
spin_lock_irqsave(&rk_dev->lock, flags);
alg_ctx->ops.hw_init(rk_dev, algt->algo, algt->mode);
err = rk_set_data_start(rk_dev);
spin_unlock_irqrestore(&rk_dev->lock, flags);
return err;
}
@@ -443,7 +441,6 @@ int rk_aead_start(struct rk_crypto_dev *rk_dev)
struct rk_crypto_algt *algt = rk_aead_get_algt(tfm);
struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
unsigned int total = 0, authsize;
unsigned long flags;
int err = 0;
total = req->cryptlen + req->assoclen;
@@ -464,10 +461,9 @@ int rk_aead_start(struct rk_crypto_dev *rk_dev)
CRYPTO_TRACE("is_enc = %d, authsize = %u, cryptlen = %u, total = %u, assoclen = %u",
ctx->is_enc, authsize, req->cryptlen, alg_ctx->total, alg_ctx->assoclen);
spin_lock_irqsave(&rk_dev->lock, flags);
alg_ctx->ops.hw_init(rk_dev, algt->algo, algt->mode);
err = rk_set_data_start(rk_dev);
spin_unlock_irqrestore(&rk_dev->lock, flags);
return err;
}

View File

@@ -72,14 +72,18 @@ static int check_scatter_align(struct scatterlist *sg_src,
{
int in, out, align;
/* The last piece has no need for length alignment */
in = IS_ALIGNED((u32)sg_src->offset, 4) &&
IS_ALIGNED((u32)sg_src->length, align_mask) &&
(!sg_next(sg_src) ||
IS_ALIGNED((u32)sg_src->length, align_mask)) &&
(sg_phys(sg_src) < SZ_4G);
if (!sg_dst)
return in;
/* The last piece has no need for length alignment */
out = IS_ALIGNED((u32)sg_dst->offset, 4) &&
IS_ALIGNED((u32)sg_dst->length, align_mask) &&
(!sg_next(sg_dst) ||
IS_ALIGNED((u32)sg_dst->length, align_mask)) &&
(sg_phys(sg_dst) < SZ_4G);
align = in && out;

View File

@@ -50,15 +50,33 @@ static int zero_message_process(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
int rk_digest_size = crypto_ahash_digestsize(tfm);
const u8 sha256_zero_msg_hash[SHA256_DIGEST_SIZE] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
};
const u8 sha1_zero_msg_hash[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
0xaf, 0xd8, 0x07, 0x09
};
const u8 md5_zero_msg_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
};
switch (rk_digest_size) {
case SHA1_DIGEST_SIZE:
memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
memcpy(req->result, sha1_zero_msg_hash, rk_digest_size);
break;
case SHA256_DIGEST_SIZE:
memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
memcpy(req->result, sha256_zero_msg_hash, rk_digest_size);
break;
case MD5_DIGEST_SIZE:
memcpy(req->result, md5_zero_message_hash, rk_digest_size);
memcpy(req->result, md5_zero_msg_hash, rk_digest_size);
break;
default:
return -EINVAL;

View File

@@ -66,7 +66,7 @@ static int rk_get_bc(u32 algo, u32 mode, u32 *bc_val)
switch (algo) {
case CIPHER_ALGO_DES3_EDE:
*bc_val |= RK_CRYPTO_TDES_SELECT;
/* fall through */
fallthrough;
case CIPHER_ALGO_DES:
if (mode == CIPHER_MODE_ECB)
*bc_val = 0;
@@ -269,7 +269,6 @@ static int rk_ablk_start(struct rk_crypto_dev *rk_dev)
struct skcipher_request *req =
skcipher_request_cast(rk_dev->async_req);
struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev);
unsigned long flags;
int err = 0;
alg_ctx->left_bytes = req->cryptlen;
@@ -281,10 +280,9 @@ static int rk_ablk_start(struct rk_crypto_dev *rk_dev)
alg_ctx->req_dst = req->dst;
alg_ctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
spin_lock_irqsave(&rk_dev->lock, flags);
rk_ablk_hw_init(rk_dev);
err = rk_set_data_start(rk_dev);
spin_unlock_irqrestore(&rk_dev->lock, flags);
return err;
}

View File

@@ -58,6 +58,10 @@ static void rk_hash_reset(struct rk_crypto_dev *rk_dev)
pool_timeout_us);
CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, 0xffff0000);
/* clear dma int status */
tmp = CRYPTO_READ(rk_dev, CRYPTO_DMA_INT_ST);
CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_ST, tmp);
}
static int rk_crypto_irq_handle(int irq, void *dev_id)

View File

@@ -160,7 +160,7 @@ static int rk_rsa_calc(struct akcipher_request *req, bool encypt)
goto exit;
out = rk_bn_alloc(key_byte_size);
if (!in)
if (!out)
goto exit;
tmp_buf = kzalloc(key_byte_size, GFP_KERNEL);

View File

@@ -197,6 +197,10 @@ static void rk_cipher_reset(struct rk_crypto_dev *rk_dev)
pool_timeout_us);
CRYPTO_WRITE(rk_dev, CRYPTO_BC_CTL, 0xffff0000);
/* clear dma int status */
tmp = CRYPTO_READ(rk_dev, CRYPTO_DMA_INT_ST);
CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_ST, tmp);
}
static void rk_crypto_complete(struct crypto_async_request *base, int err)

View File

@@ -63,6 +63,10 @@ static void rk_hash_reset(struct rk_crypto_dev *rk_dev)
pool_timeout_us);
CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, 0xffff0000);
/* clear dma int status */
tmp = CRYPTO_READ(rk_dev, CRYPTO_DMA_INT_ST);
CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_ST, tmp);
}
static int rk_hash_mid_data_store(struct rk_crypto_dev *rk_dev, struct rk_hash_mid_data *mid_data)

View File

@@ -196,6 +196,10 @@ static void rk_cipher_reset(struct rk_crypto_dev *rk_dev)
pool_timeout_us);
CRYPTO_WRITE(rk_dev, CRYPTO_BC_CTL, 0xffff0000);
/* clear dma int status */
tmp = CRYPTO_READ(rk_dev, CRYPTO_DMA_INT_ST);
CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_ST, tmp);
}
static void rk_crypto_complete(struct crypto_async_request *base, int err)