linux-loongson/drivers/block/zram/zcomp.h
Sergey Senozhatsky e74a1c6a8e zram: pass buffer offset to zcomp_available_show()
In most cases zcomp_available_show() is the only emitting
function that is called from sysfs read() handler, so it
assumes that there is a whole PAGE_SIZE buffer to work with.
There is an exception, however: recomp_algorithm_show().

In recomp_algorithm_show() we prepend the buffer with
priority number before we pass it to zcomp_available_show(),
so it cannot assume PAGE_SIZE anymore and must take
recomp_algorithm_show() modifications into consideration.
Therefore we need to pass buffer offset to zcomp_available_show().

Also convert it to use sysfs_emit_at(), to stay aligned
with the rest of zram's sysfs read() handlers.

On practice we are never even close to using the whole PAGE_SIZE
buffer, so that's not a critical bug, but still.

Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Link: https://lore.kernel.org/r/20250627071840.1394242-1-senozhatsky@chromium.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-07-03 19:56:51 -06:00

97 lines
2.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
#include <linux/mutex.h>
#define ZCOMP_PARAM_NOT_SET INT_MIN
struct deflate_params {
s32 winbits;
};
/*
* Immutable driver (backend) parameters. The driver may attach private
* data to it (e.g. driver representation of the dictionary, etc.).
*
* This data is kept per-comp and is shared among execution contexts.
*/
struct zcomp_params {
void *dict;
size_t dict_sz;
s32 level;
union {
struct deflate_params deflate;
};
void *drv_data;
};
/*
* Run-time driver context - scratch buffers, etc. It is modified during
* request execution (compression/decompression), cannot be shared, so
* it's in per-CPU area.
*/
struct zcomp_ctx {
void *context;
};
struct zcomp_strm {
struct mutex lock;
/* compression buffer */
void *buffer;
/* local copy of handle memory */
void *local_copy;
struct zcomp_ctx ctx;
};
struct zcomp_req {
const unsigned char *src;
const size_t src_len;
unsigned char *dst;
size_t dst_len;
};
struct zcomp_ops {
int (*compress)(struct zcomp_params *params, struct zcomp_ctx *ctx,
struct zcomp_req *req);
int (*decompress)(struct zcomp_params *params, struct zcomp_ctx *ctx,
struct zcomp_req *req);
int (*create_ctx)(struct zcomp_params *params, struct zcomp_ctx *ctx);
void (*destroy_ctx)(struct zcomp_ctx *ctx);
int (*setup_params)(struct zcomp_params *params);
void (*release_params)(struct zcomp_params *params);
const char *name;
};
/* dynamic per-device compression frontend */
struct zcomp {
struct zcomp_strm __percpu *stream;
const struct zcomp_ops *ops;
struct zcomp_params *params;
struct hlist_node node;
};
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at);
bool zcomp_available_algorithm(const char *comp);
struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params);
void zcomp_destroy(struct zcomp *comp);
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
void zcomp_stream_put(struct zcomp_strm *zstrm);
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
const void *src, unsigned int *dst_len);
int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
const void *src, unsigned int src_len, void *dst);
#endif /* _ZCOMP_H_ */