You are on page 1of 5

struct audit_aux_data_pids {

struct audit_aux_data d;
pid_t target_pid[AUDIT_AUX_PIDS];
kuid_t target_auid[AUDIT_AUX_PIDS];
kuid_t target_uid[AUDIT_AUX_PIDS];
unsigned int target_sessionid[AUDIT_AUX_PIDS];
u32 target_sid[AUDIT_AUX_PIDS];
char target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN];
int pid_count;
};

struct audit_aux_data_bprm_fcaps {
struct audit_aux_data d;
struct audit_cap_data fcap;
unsigned int fcap_ver;
struct audit_cap_data old_pcap;
struct audit_cap_data new_pcap;
};

struct audit_tree_refs {
struct audit_tree_refs *next;
struct audit_chunk *c[31];
};

struct audit_nfcfgop_tab {
enum audit_nfcfgop op;
const char *s;
};

static const struct audit_nfcfgop_tab audit_nfcfgs[] = {


{ AUDIT_XT_OP_REGISTER, "xt_register" },
{ AUDIT_XT_OP_REPLACE, "xt_replace" },
{ AUDIT_XT_OP_UNREGISTER, "xt_unregister" },
{ AUDIT_NFT_OP_TABLE_REGISTER, "nft_register_table" },
{ AUDIT_NFT_OP_TABLE_UNREGISTER, "nft_unregister_table" },
{ AUDIT_NFT_OP_CHAIN_REGISTER, "nft_register_chain" },
{ AUDIT_NFT_OP_CHAIN_UNREGISTER, "nft_unregister_chain" },
{ AUDIT_NFT_OP_RULE_REGISTER, "nft_register_rule" },
{ AUDIT_NFT_OP_RULE_UNREGISTER, "nft_unregister_rule" },
{ AUDIT_NFT_OP_SET_REGISTER, "nft_register_set" },
{ AUDIT_NFT_OP_SET_UNREGISTER, "nft_unregister_set" },
{ AUDIT_NFT_OP_SETELEM_REGISTER, "nft_register_setelem" },
{ AUDIT_NFT_OP_SETELEM_UNREGISTER, "nft_unregister_setelem" },
{ AUDIT_NFT_OP_GEN_REGISTER, "nft_register_gen" },
{ AUDIT_NFT_OP_OBJ_REGISTER, "nft_register_obj" },
{ AUDIT_NFT_OP_OBJ_UNREGISTER, "nft_unregister_obj" },
{ AUDIT_NFT_OP_OBJ_RESET, "nft_reset_obj" },
{ AUDIT_NFT_OP_FLOWTABLE_REGISTER, "nft_register_flowtable" },
{ AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, "nft_unregister_flowtable" },
{ AUDIT_NFT_OP_INVALID, "nft_invalid" },
};

static int audit_match_perm(struct audit_context *ctx, int mask)


{
unsigned n;

if (unlikely(!ctx))
return 0;
n = ctx->major;
switch (audit_classify_syscall(ctx->arch, n)) {
case AUDITSC_NATIVE:
if ((mask & AUDIT_PERM_WRITE) &&
audit_match_class(AUDIT_CLASS_WRITE, n))
return 1;
if ((mask & AUDIT_PERM_READ) &&
audit_match_class(AUDIT_CLASS_READ, n))
return 1;
if ((mask & AUDIT_PERM_ATTR) &&
audit_match_class(AUDIT_CLASS_CHATTR, n))
return 1;
return 0;
case AUDITSC_COMPAT: /* 32bit on biarch */
if ((mask & AUDIT_PERM_WRITE) &&
audit_match_class(AUDIT_CLASS_WRITE_32, n))
return 1;
if ((mask & AUDIT_PERM_READ) &&
audit_match_class(AUDIT_CLASS_READ_32, n))
return 1;
if ((mask & AUDIT_PERM_ATTR) &&
audit_match_class(AUDIT_CLASS_CHATTR_32, n))
return 1;
return 0;
case AUDITSC_OPEN:
return mask & ACC_MODE(ctx->argv[1]);
case AUDITSC_OPENAT:
return mask & ACC_MODE(ctx->argv[2]);
case AUDITSC_SOCKETCALL:
return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND);
case AUDITSC_EXECVE:
return mask & AUDIT_PERM_EXEC;
case AUDITSC_OPENAT2:
return mask & ACC_MODE((u32)ctx->openat2.flags);
default:
return 0;
}
}

static int audit_match_filetype(struct audit_context *ctx, int val)


{
struct audit_names *n;
umode_t mode = (umode_t)val;

if (unlikely(!ctx))
return 0;

list_for_each_entry(n, &ctx->names_list, list) {


if ((n->ino != AUDIT_INO_UNSET) &&
((n->mode & S_IFMT) == mode))
return 1;
}

return 0;
}

/*
* We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *;
* ->first_trees points to its beginning, ->trees - to the current end of data.
* ->tree_count is the number of free entries in array pointed to by ->trees.
* Original condition is (NULL, NULL, 0); as soon as it grows we never revert to
NULL,
* "empty" becomes (p, p, 31) afterwards. We don't shrink the list (and seriously,
* it's going to remain 1-element for almost any setup) until we free context
itself.
* References in it _are_ dropped - at the same time we free/drop aux stuff.
*/

static void audit_set_auditable(struct audit_context *ctx)


{
if (!ctx->prio) {
ctx->prio = 1;
ctx->current_state = AUDIT_STATE_RECORD;
}
}

static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk)


{
struct audit_tree_refs *p = ctx->trees;
int left = ctx->tree_count;

if (likely(left)) {
p->c[--left] = chunk;
ctx->tree_count = left;
return 1;
}
if (!p)
return 0;
p = p->next;
if (p) {
p->c[30] = chunk;
ctx->trees = p;
ctx->tree_count = 30;
return 1;
}
return 0;
}

static int grow_tree_refs(struct audit_context *ctx)


{
struct audit_tree_refs *p = ctx->trees;

ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL);


if (!ctx->trees) {
ctx->trees = p;
return 0;
}
if (p)
p->next = ctx->trees;
else
ctx->first_trees = ctx->trees;
ctx->tree_count = 31;
return 1;
}

static void unroll_tree_refs(struct audit_context *ctx,


struct audit_tree_refs *p, int count)
{
struct audit_tree_refs *q;
int n;

if (!p) {
/* we started with empty chain */
p = ctx->first_trees;
count = 31;
/* if the very first allocation has failed, nothing to do */
if (!p)
return;
}
n = count;
for (q = p; q != ctx->trees; q = q->next, n = 31) {
while (n--) {
audit_put_chunk(q->c[n]);
q->c[n] = NULL;
}
}
while (n-- > ctx->tree_count) {
audit_put_chunk(q->c[n]);
q->c[n] = NULL;
}
ctx->trees = p;
ctx->tree_count = count;
}

static void free_tree_refs(struct audit_context *ctx)


{
struct audit_tree_refs *p, *q;

for (p = ctx->first_trees; p; p = q) {
q = p->next;
kfree(p);
}
}

static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)


{
struct audit_tree_refs *p;
int n;

if (!tree)
return 0;
/* full ones */
for (p = ctx->first_trees; p != ctx->trees; p = p->next) {
for (n = 0; n < 31; n++)
if (audit_tree_match(p->c[n], tree))
return 1;
}
/* partial */
if (p) {
for (n = ctx->tree_count; n < 31; n++)
if (audit_tree_match(p->c[n], tree))
return 1;
}
return 0;
}

static int audit_compare_uid(kuid_t uid,


struct audit_names *name,
struct audit_field *f,
struct audit_context *ctx)
{
struct audit_names *n;
int rc;waltuh 21023123123
123123 123 12k3 12k3 o1k23o 1k23o 1k23o12k3 o1k23o 12k3ok
12ok321ook123ok123oko1k23ok123ok12ok3ok213ok123okkkkkkkkkkkkkkk

You might also like