linux-loongson/drivers/block/zram/zcomp.c
Sergey Senozhatsky e74a1c6a8e zram: pass buffer offset to zcomp_available_show()
In most cases zcomp_available_show() is the only emitting
function that is called from sysfs read() handler, so it
assumes that there is a whole PAGE_SIZE buffer to work with.
There is an exception, however: recomp_algorithm_show().

In recomp_algorithm_show() we prepend the buffer with
priority number before we pass it to zcomp_available_show(),
so it cannot assume PAGE_SIZE anymore and must take
recomp_algorithm_show() modifications into consideration.
Therefore we need to pass buffer offset to zcomp_available_show().

Also convert it to use sysfs_emit_at(), to stay aligned
with the rest of zram's sysfs read() handlers.

On practice we are never even close to using the whole PAGE_SIZE
buffer, so that's not a critical bug, but still.

Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Link: https://lore.kernel.org/r/20250627071840.1394242-1-senozhatsky@chromium.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-07-03 19:56:51 -06:00

258 lines
5.6 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/cpuhotplug.h>
#include <linux/vmalloc.h>
#include <linux/sysfs.h>
#include "zcomp.h"
#include "backend_lzo.h"
#include "backend_lzorle.h"
#include "backend_lz4.h"
#include "backend_lz4hc.h"
#include "backend_zstd.h"
#include "backend_deflate.h"
#include "backend_842.h"
static const struct zcomp_ops *backends[] = {
#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
&backend_lzorle,
&backend_lzo,
#endif
#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
&backend_lz4,
#endif
#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
&backend_lz4hc,
#endif
#if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
&backend_zstd,
#endif
#if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
&backend_deflate,
#endif
#if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
&backend_842,
#endif
NULL
};
static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
{
comp->ops->destroy_ctx(&zstrm->ctx);
vfree(zstrm->local_copy);
vfree(zstrm->buffer);
zstrm->buffer = NULL;
}
static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
{
int ret;
ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
if (ret)
return ret;
zstrm->local_copy = vzalloc(PAGE_SIZE);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
zstrm->buffer = vzalloc(2 * PAGE_SIZE);
if (!zstrm->buffer || !zstrm->local_copy) {
zcomp_strm_free(comp, zstrm);
return -ENOMEM;
}
return 0;
}
static const struct zcomp_ops *lookup_backend_ops(const char *comp)
{
int i = 0;
while (backends[i]) {
if (sysfs_streq(comp, backends[i]->name))
break;
i++;
}
return backends[i];
}
bool zcomp_available_algorithm(const char *comp)
{
return lookup_backend_ops(comp) != NULL;
}
/* show available compressors */
ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at)
{
int i;
for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
if (!strcmp(comp, backends[i]->name)) {
at += sysfs_emit_at(buf, at, "[%s] ",
backends[i]->name);
} else {
at += sysfs_emit_at(buf, at, "%s ", backends[i]->name);
}
}
at += sysfs_emit_at(buf, at, "\n");
return at;
}
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
for (;;) {
struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
/*
* Inspired by zswap
*
* stream is returned with ->mutex locked which prevents
* cpu_dead() from releasing this stream under us, however
* there is still a race window between raw_cpu_ptr() and
* mutex_lock(), during which we could have been migrated
* from a CPU that has already destroyed its stream. If
* so then unlock and re-try on the current CPU.
*/
mutex_lock(&zstrm->lock);
if (likely(zstrm->buffer))
return zstrm;
mutex_unlock(&zstrm->lock);
}
}
void zcomp_stream_put(struct zcomp_strm *zstrm)
{
mutex_unlock(&zstrm->lock);
}
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
const void *src, unsigned int *dst_len)
{
struct zcomp_req req = {
.src = src,
.dst = zstrm->buffer,
.src_len = PAGE_SIZE,
.dst_len = 2 * PAGE_SIZE,
};
int ret;
might_sleep();
ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
if (!ret)
*dst_len = req.dst_len;
return ret;
}
int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
const void *src, unsigned int src_len, void *dst)
{
struct zcomp_req req = {
.src = src,
.dst = dst,
.src_len = src_len,
.dst_len = PAGE_SIZE,
};
might_sleep();
return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
}
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
int ret;
ret = zcomp_strm_init(comp, zstrm);
if (ret)
pr_err("Can't allocate a compression stream\n");
return ret;
}
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
mutex_lock(&zstrm->lock);
zcomp_strm_free(comp, zstrm);
mutex_unlock(&zstrm->lock);
return 0;
}
static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
{
int ret, cpu;
comp->stream = alloc_percpu(struct zcomp_strm);
if (!comp->stream)
return -ENOMEM;
comp->params = params;
ret = comp->ops->setup_params(comp->params);
if (ret)
goto cleanup;
for_each_possible_cpu(cpu)
mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
if (ret < 0)
goto cleanup;
return 0;
cleanup:
comp->ops->release_params(comp->params);
free_percpu(comp->stream);
return ret;
}
void zcomp_destroy(struct zcomp *comp)
{
cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
comp->ops->release_params(comp->params);
free_percpu(comp->stream);
kfree(comp);
}
struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
{
struct zcomp *comp;
int error;
/*
* The backends array has a sentinel NULL value, so the minimum
* size is 1. In order to be valid the array, apart from the
* sentinel NULL element, should have at least one compression
* backend selected.
*/
BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
if (!comp)
return ERR_PTR(-ENOMEM);
comp->ops = lookup_backend_ops(alg);
if (!comp->ops) {
kfree(comp);
return ERR_PTR(-EINVAL);
}
error = zcomp_init(comp, params);
if (error) {
kfree(comp);
return ERR_PTR(error);
}
return comp;
}